diff --git a/README.en.md b/README.en.md index e70b61f..8b58793 100644 --- a/README.en.md +++ b/README.en.md @@ -14,9 +14,13 @@ A high-efficiency MySQL physical backup and OSS upload tool. Supports Percona Xt - **Percona XtraBackup**: For MySQL physical backup - [Download](https://www.percona.com/downloads/Percona-XtraBackup-LATEST/) - Ensure `xtrabackup` is in your PATH +- **MySQL Server Connection**: The tool connects to MySQL server via TCP/IP protocol + - No need to install `mysql` command-line client tool + - No need for local `mysqld` or socket files + - Only requires TCP/IP connectivity to MySQL server (host:port) ### Optional -- **zstd**: For zstd compression (when using `--compress-type=zstd`) +- **zstd**: For zstd compression (when using `--compress=zstd`) - [Download](https://github.com/facebook/zstd) - Ensure `zstd` is in your PATH @@ -34,28 +38,57 @@ A high-efficiency MySQL physical backup and OSS upload tool. Supports Percona Xt "objectName": "backup/your-backup", // Only prefix needed, timestamp and suffix are auto-appended "size": 104857600, "buffer": 10, - "traffic": 209715200, "mysqlHost": "127.0.0.1", "mysqlPort": 3306, "mysqlUser": "root", "mysqlPassword": "your-mysql-password", - "compress": true, + "compressType": "zstd", "mode": "oss", "streamPort": 9999, + "streamHost": "", + "mysqlVersion": { + "major": 5, + "minor": 7, + "patch": 0 + }, + "qwenAPIKey": "your-qwen-api-key", "enableHandshake": false, "streamKey": "your-secret-key", "existedBackup": "", "logDir": "/var/log/mysql-backup-helper", "estimatedSize": 0, - "ioLimit": 0 + "ioLimit": 0, + "downloadOutput": "", + "remoteOutput": "" } ``` - **objectName**: Only specify the prefix. The final OSS object will be `objectName_YYYYMMDDHHMM`, e.g. `backup/your-backup_202507181648.xb.zst` +- **compressType**: Compression type, options: `zstd`, `qp` (qpress), or empty string/`no` (no compression). Supported in all modes (oss, stream) +- **streamPort**: Streaming port, set to `0` to auto-find available port +- **streamHost**: Remote host IP for active push mode - **existedBackup**: Path to existing backup file for upload or streaming (use '-' for stdin) - **logDir**: Log file storage directory, defaults to `/var/log/mysql-backup-helper`, supports both relative and absolute paths +- **downloadOutput**: Default output path for download mode +- **remoteOutput**: Remote save path for SSH mode +- **ioLimit**: IO bandwidth limit (bytes per second), set to `0` to use default (200MB/s), set to `-1` for unlimited speed +- **parallel**: Number of parallel threads (default: 4), used for xtrabackup backup, compression, decompression, and xbstream extraction operations +- **useMemory**: Memory to use for prepare operation (default: 1G), supports units (e.g., '1G', '512M') +- **xtrabackupPath**: Path to xtrabackup binary or directory containing xtrabackup/xbstream. Priority: command-line flag > config file > environment variable `XTRABACKUP_PATH` > PATH lookup - All config fields can be overridden by command-line arguments. Command-line arguments take precedence over config. +**Note**: The tool automatically handles the following xtrabackup options without user configuration: +- `--defaults-file`: Can be manually specified via `--defaults-file` parameter for MySQL config file path (my.cnf). If not specified, no auto-detection is performed to avoid using wrong config files +- `--close-files=1`: Automatically enabled to handle large number of tables +- File descriptor limit: Automatically set to 655360 (via ulimit) + +**Compatibility Notes**: +- The tool supports a wide range of xtrabackup/xbstream versions, including older versions that don't support the `--version` flag (e.g., xbstream 2.4.12) +- The tool uses multiple fallback methods to verify binary executability (`--version` → `-h` → `--help` → run without arguments) +- `--prepare` mode does not require xbstream, only xtrabackup is needed +- The tool does not depend on `mysql` command-line client, connects directly to MySQL server via Go MySQL driver +- When getting config file path, if MySQL variables cannot be queried (e.g., insufficient permissions), it gracefully falls back to checking common paths + --- ## Command-line Arguments @@ -68,19 +101,30 @@ A high-efficiency MySQL physical backup and OSS upload tool. Supports Percona Xt | --user | MySQL username (overrides config) | | --password | MySQL password (overrides config, prompt if omitted) | | --backup | Run backup (otherwise only checks parameters) | +| --check | Pre-check mode: perform pre-flight validation. Can be used alone (check all modes) or combined with other modes (e.g., `--check --backup` checks backup mode only) | | --download | Download mode: receive backup data from TCP stream and save | +| --prepare | Prepare mode: execute xtrabackup --prepare to make backup ready for restore | | --output | Output file path for download mode (use '-' for stdout, default: backup_YYYYMMDDHHMMSS.xb) | -| --mode | Backup mode: `oss` (upload to OSS) or `stream` (push to TCP) | -| --stream-port | Local port for streaming mode (e.g. 9999, 0 = auto-find available port) | -| --compress | Enable compression | -| --compress-type | Compression type: `qp` (qpress), `zstd` | +| --target-dir | Directory: extraction directory for download mode, backup directory for prepare mode | +| --mode | Backup mode: `oss` (upload to OSS) or `stream` (push to TCP, default) | +| --log-file | Custom log file name (relative to logDir or absolute path). If not specified, auto-generates `backup-helper-{timestamp}.log` | +| --stream-port | Local port for streaming mode (e.g. 9999, 0 = auto-find available port), or remote port when --stream-host is specified | +| --stream-host | Remote host IP (e.g., '192.168.1.100'). When specified, actively connects to remote server to push data, similar to `nc host port` | +| --ssh | Use SSH to automatically start receiver on remote host (requires --stream-host, relies on system SSH config) | +| --remote-output | Remote output path for SSH mode (default: auto-generated) | +| --compress | Compression: `qp` (qpress), `zstd`, or `no` (no compression). Defaults to qp when no value provided. Supported in all modes (oss, stream) | | --lang | Language: `zh` (Chinese) or `en` (English), auto-detect if unset | -| --ai-diagnose=on/off| AI diagnosis on backup failure. 'on' runs automatically (requires Qwen API Key in config), 'off' skips, unset will prompt interactively. | +| --ai-diagnose=on/off| AI diagnosis on operation failure. 'on' prompts user whether to run diagnosis (use with -y to skip prompt and run directly), 'off' skips, unset defaults to 'off' (no diagnosis). Supports all modules (BACKUP, PREPARE, TCP, OSS, EXTRACT, etc.). | | --enable-handshake | Enable handshake for TCP streaming (default: false, can be set in config) | | --stream-key | Handshake key for TCP streaming (default: empty, can be set in config) | | --existed-backup | Path to existing xtrabackup backup file to upload or stream (use '-' for stdin) | | --estimated-size | Estimated backup size with units (e.g., '100MB', '1GB') or bytes (for progress tracking) | | --io-limit | IO bandwidth limit with units (e.g., '100MB/s', '1GB/s') or bytes per second. Use -1 for unlimited speed | +| --parallel | Number of parallel threads (default: 4), used for xtrabackup backup (--parallel), qpress compression (--compress-threads), zstd compression/decompression (-T), xbstream extraction (--parallel), and xtrabackup decompression (--parallel) | +| --use-memory | Memory to use for prepare operation (e.g., '1G', '512M'). Default: 1G | +| --defaults-file | Path to MySQL configuration file (my.cnf). If not specified, no auto-detection is performed and --defaults-file will not be passed to xtrabackup | +| --xtrabackup-path | Path to xtrabackup binary or directory containing xtrabackup/xbstream (overrides config file and environment variable) | +| -y, --yes | Non-interactive mode: automatically answer 'yes' to all prompts (including directory overwrite confirmation and AI diagnosis confirmation) | | --version, -v | Show version information | --- @@ -108,8 +152,10 @@ go build -a -o backup-helper main.go ### 4. Specify compression type ```sh -./backup-helper --config config.json --backup --mode=oss --compress-type=zstd -./backup-helper --config config.json --backup --mode=oss --compress-type=qp +./backup-helper --config config.json --backup --mode=oss --compress=zstd +./backup-helper --config config.json --backup --mode=oss --compress=qp +./backup-helper --config config.json --backup --mode=oss --compress=no +./backup-helper --config config.json --backup --mode=oss --compress ``` ### 5. Streaming mode @@ -134,26 +180,107 @@ nc 192.168.1.100 54321 > streamed-backup.xb - **In stream mode, all compression options are ignored; the backup is always sent as a raw physical stream.** - **When auto-finding ports, the program automatically obtains and displays the local IP in the output, making remote connections easy.** +- **Use `--stream-host` to actively push to a remote server; the receiver side uses `--download --stream-port` to listen on the specified port.** + +### 5.2. Actively push to remote server + +```sh +# Sender side: actively connect to remote server and push data +./backup-helper --config config.json --backup --mode=stream --stream-host=192.168.1.100 --stream-port=9999 + +# Receiver side: listen and receive data on remote server +./backup-helper --download --stream-port=9999 +``` + +This achieves similar functionality to `xtrabackup | nc 192.168.1.100 9999`. + +### 5.3. SSH Mode: Automatically start receiver on remote host + +If you have SSH access, you can use `--ssh` to automatically start the receiver on the remote host: + +```sh +# SSH mode + auto-discover port (recommended) +./backup-helper --config config.json --backup --mode=stream \ + --stream-host=replica-server \ + --ssh \ + --remote-output=/backup/mysql_backup.xb \ + --estimated-size=10GB + +# SSH mode + specified port +./backup-helper --config config.json --backup --mode=stream \ + --stream-host=replica-server \ + --ssh \ + --stream-port=9999 \ + --remote-output=/backup/mysql_backup.xb + +# Traditional mode: requires manually running receiver on remote +./backup-helper --config config.json --backup --mode=stream \ + --stream-host=replica-server \ + --stream-port=9999 +``` + +**SSH Mode Notes:** +- When using `--ssh`, the program automatically executes `backup-helper --download` on the remote host via SSH +- Relies on existing SSH configuration (`~/.ssh/config`, keys, etc.), no additional setup needed +- If `--stream-port` is specified, starts service on that port; otherwise auto-discovers available port +- Automatically cleans up remote process after transfer completes +- Similar to `rsync -e ssh` usage - if SSH keys are configured, it just works -### 6. Parameter check only (no backup) +### 6. Pre-check Mode (--check) + +The `--check` mode can be used alone or combined with other modes: + +```sh +# Use alone: check all modes (BACKUP, DOWNLOAD, PREPARE) +./backup-helper --check + +# Check all modes (including MySQL compatibility checks) +./backup-helper --check --host=127.0.0.1 --user=root --password=yourpass --port=3306 + +# Check backup mode only (does not execute backup) +./backup-helper --check --backup --host=127.0.0.1 --user=root --password=yourpass + +# Check download mode only (does not execute download) +./backup-helper --check --download --target-dir=/path/to/extract + +# Check prepare mode only (does not execute prepare) +./backup-helper --check --prepare --target-dir=/path/to/backup + +# Check with compression type specified +./backup-helper --check --compress=zstd --host=127.0.0.1 --user=root --password=yourpass +``` + +**Check Contents:** +- **Dependency Checks**: Verify if xtrabackup, xbstream, zstd, qpress tools are installed +- **MySQL Compatibility Checks** (backup mode): MySQL version, xtrabackup version compatibility, data size estimation, replication parameters, config file validation +- **System Resource Checks** (when using --check alone): CPU cores, memory size, network interfaces +- **Parameter Recommendations** (backup mode): Recommend parallel, io-limit, use-memory parameters based on system resources +- **Target Directory Checks** (download/prepare modes): Verify directory existence, writability, backup file presence, etc. + +**Important Notes:** +- When using `--backup`, `--download`, or `--prepare`, the tool automatically performs pre-flight checks before execution +- If pre-flight checks find critical issues (ERROR), the tool will stop and prompt you to fix them +- When using `--check` combined with a mode (e.g., `--check --backup`), only checks are performed, no actual operations are executed + +### 7. Parameter check only (no backup) ```sh ./backup-helper --config config.json ``` -### 7. All command-line (no config.json) +### 8. All command-line (no config.json) ```sh -./backup-helper --host=127.0.0.1 --user=root --password=123456 --port=3306 --backup --mode=oss --compress-type=qp +./backup-helper --host=127.0.0.1 --user=root --password=123456 --port=3306 --backup --mode=oss --compress=qp ``` -### 8. Upload existing backup file to OSS +### 9. Upload existing backup file to OSS ```sh ./backup-helper --config config.json --existed-backup backup.xb --mode=oss ``` -### 9. Stream existing backup file via TCP +### 10. Stream existing backup file via TCP ```sh ./backup-helper --config config.json --existed-backup backup.xb --mode=stream --stream-port=9999 @@ -161,33 +288,33 @@ nc 192.168.1.100 54321 > streamed-backup.xb nc 127.0.0.1 9999 > streamed-backup.xb ``` -### 10. Use cat command to read from stdin and upload to OSS +### 11. Use cat command to read from stdin and upload to OSS ```sh cat backup.xb | ./backup-helper --config config.json --existed-backup - --mode=oss ``` -### 11. Use cat command to read from stdin and stream via TCP +### 12. Use cat command to read from stdin and stream via TCP ```sh cat backup.xb | ./backup-helper --config config.json --existed-backup - --mode=stream --stream-port=9999 ``` -### 12. Manually specify upload rate limit (e.g., limit to 100 MB/s) +### 13. Manually specify upload rate limit (e.g., limit to 100 MB/s) ```sh ./backup-helper --config config.json --backup --mode=oss --io-limit 100MB/s # Supports units: KB/s, MB/s, GB/s, TB/s, or use bytes per second directly ``` -### 13. Disable rate limiting (unlimited upload speed) +### 14. Disable rate limiting (unlimited upload speed) ```sh ./backup-helper --config config.json --backup --mode=oss --io-limit -1 # Use -1 to completely disable rate limiting for maximum upload speed ``` -### 14. Specify estimated size for accurate progress display +### 15. Specify estimated size for accurate progress display ```sh ./backup-helper --config config.json --backup --mode=oss --estimated-size 1GB @@ -195,7 +322,31 @@ cat backup.xb | ./backup-helper --config config.json --existed-backup - --mode=s # Example: --estimated-size 1073741824 or --estimated-size 1GB ``` -### 15. Download mode: Receive backup data from TCP stream +### 15. Prepare backup (Prepare Mode) + +After backup is complete, execute prepare to make the backup ready for restore: + +```sh +# Basic usage +./backup-helper --prepare --target-dir=/path/to/backup + +# Specify parallel threads and memory size +./backup-helper --prepare --target-dir=/path/to/backup --parallel=8 --use-memory=2G + +# Use config file +./backup-helper --config config.json --prepare --target-dir=/path/to/backup + +# Optional: Provide MySQL connection info and --defaults-file +./backup-helper --prepare --target-dir=/path/to/backup --host=127.0.0.1 --user=root --port=3306 --defaults-file=/etc/my.cnf +``` + +**Notes**: +- `--target-dir`: Required, specifies the backup directory to prepare +- `--parallel`: Number of parallel threads, default 4 (can be set in config file or command line) +- `--use-memory`: Memory to use for prepare operation, default 1G (supports units: G, M, K) +- `--defaults-file`: Optional, manually specify MySQL config file path (if not specified, no auto-detection is performed) + +### 16. Download mode: Receive backup data from TCP stream ```sh # Download to default file (backup_YYYYMMDDHHMMSS.xb) @@ -207,11 +358,20 @@ cat backup.xb | ./backup-helper --config config.json --existed-backup - --mode=s # Stream to stdout (can be used with pipes for compression or extraction) ./backup-helper --download --stream-port 9999 --output - | zstd -d > backup.xb -# Direct extraction using xbstream +# Direct extraction using xbstream (uncompressed backup) ./backup-helper --download --stream-port 9999 --output - | xbstream -x -C /path/to/extract/dir -# If backup is compressed, decompress first then extract -./backup-helper --download --stream-port 9999 --output - | xbstream -x -C /path/to/extract/dir --decompress --decompress-threads=4 +# Zstd compressed backup: stream decompress then extract (recommended) +./backup-helper --download --stream-port 9999 --compress=zstd --target-dir /path/to/extract/dir + +# Zstd compressed backup: stream to stdout (can be piped to xbstream) +./backup-helper --download --stream-port 9999 --compress=zstd --output - | xbstream -x -C /path/to/extract/dir + +# Qpress compressed backup: auto decompress and extract (note: requires saving to file first, no stream decompression) +./backup-helper --download --stream-port 9999 --compress=qp --target-dir /path/to/extract/dir + +# Save zstd compressed backup (auto decompress) +./backup-helper --download --stream-port 9999 --compress=zstd --output my_backup.xb # Download with rate limiting ./backup-helper --download --stream-port 9999 --io-limit 100MB/s @@ -219,18 +379,83 @@ cat backup.xb | ./backup-helper --config config.json --existed-backup - --mode=s # Download with progress display (requires estimated size) ./backup-helper --download --stream-port 9999 --estimated-size 1GB +# Non-interactive mode: automatically confirm all prompts +./backup-helper --download --stream-port 9999 --target-dir /backup/mysql --compress=zstd -y +``` + +**Note**: +- If the directory specified by `--target-dir` already exists and is not empty, the program will prompt you to confirm overwriting existing files +- Enter `y` or `yes` to continue extraction (may overwrite existing files) +- Enter `n` or any other value to cancel extraction and exit +- Use `-y` or `--yes` flag to automatically confirm all prompts (non-interactive mode), suitable for scripts and automation scenarios + +**Download mode compression type notes:** + +- **Zstd compression (`--compress=zstd`)**: + - Supports stream decompression, can directly decompress and extract to directory + - When using `--target-dir`, automatically executes `zstd -d | xbstream -x` + - When using `--output -`, outputs decompressed stream that can be piped to `xbstream` + +- **Qpress compression (`--compress=qp` or `--compress`)**: + - **Does not support stream decompression** (xbstream in MySQL 5.7 does not support `--decompress` in stream mode) + - When using `--target-dir`, saves compressed file first, then uses `xbstream -x` to extract, finally uses `xtrabackup --decompress` to decompress + - When using `--output -`, warns and outputs raw compressed stream + +- **Uncompressed backup**: + - When `--compress` is not specified, saves or extracts directly + - When using `--target-dir`, directly uses `xbstream -x` to extract + --- ## Logging & Object Naming -- All backup logs are saved in the `logs/` directory, only the latest 10 logs are kept. +### Unified Logging System + +The tool uses a unified logging system that records all critical operations into a single log file: + +- **Log File Naming**: Defaults to auto-generated `backup-helper-{timestamp}.log` (e.g., `backup-helper-20251106105903.log`), can be customized via `--log-file` or `logFileName` in config file (supports both relative and absolute paths) +- **Log Storage Location**: Defaults to `/var/log/mysql-backup-helper`, can be specified via `--config` or `logDir` in config file (supports both relative and absolute paths) +- **Log Content**: Unified recording of all operation steps + - **[BACKUP]**: xtrabackup backup operations + - **[PREPARE]**: xtrabackup prepare operations + - **[TCP]**: TCP stream transfers (send/receive) + - **[OSS]**: OSS upload operations + - **[XBSTREAM]**: xbstream extraction operations + - **[DECOMPRESS]**: Decompression operations (zstd/qpress) + - **[EXTRACT]**: Extraction operations + - **[SYSTEM]**: System-level logs + +- **Log Format**: Each log entry includes timestamp and module prefix, format: `[YYYY-MM-DD HH:MM:SS] [MODULE] message content` +- **Log Cleanup**: Automatically cleans old logs, keeping only the latest 10 log files +- **Error Handling**: + - On operation completion or failure, displays log file location in console + - On failure, automatically extracts error summary and displays in console + - All modules support AI diagnosis (requires Qwen API Key configuration) + - **Connection Interruption Detection**: Automatically detects TCP connection interruptions, process abnormal terminations, etc., logs to file and aborts the process to avoid processing incomplete data + +Example log content: +``` +[2025-11-06 10:59:03] [SYSTEM] === MySQL Backup Helper Log Started === +[2025-11-06 10:59:03] [SYSTEM] Timestamp: 2025-11-06 10:59:03 +[2025-11-06 10:59:03] [BACKUP] Starting backup operation +[2025-11-06 10:59:03] [BACKUP] Command: xtrabackup --backup --stream=xbstream ... +[2025-11-06 10:59:03] [TCP] Listening on 192.168.1.100:9999 +[2025-11-06 10:59:03] [TCP] Client connected +[2025-11-06 10:59:03] [TCP] Transfer started +``` + +### OSS Object Naming + - OSS object names are auto-appended with a timestamp, e.g. `backup/your-backup_202507181648.xb.zst`, for easy archiving and lookup. ## Progress Tracking The tool displays real-time progress information during backup upload/download: -- **Real-time Progress**: Shows uploaded/downloaded size, total size, percentage, transfer speed, and duration +- **Real-time Progress**: Shows uploaded/downloaded size, total size, percentage (when uncompressed), transfer speed, and duration + - When compression is enabled, percentage is not shown (because compressed size differs from original size) + - When uncompressed: `Progress: 100 MB / 500 MB (20.0%) - 50 MB/s - Duration: 2s` + - When compressed: `Progress: 100 MB - 50 MB/s - Duration: 2s` - **Final Statistics**: Shows total uploaded/downloaded size, duration, and average speed - **Size Calculation**: - If `--estimated-size` is provided, uses that value directly (supports units: KB, MB, GB, TB) @@ -245,9 +470,9 @@ The tool displays real-time progress information during backup upload/download: - Supports units: `KB/s`, `MB/s`, `GB/s`, `TB/s` (e.g., `100MB/s`, `1GB/s`) - Can also use bytes per second directly (e.g., `104857600` for 100 MB/s) - Use `-1` to completely disable rate limiting (unlimited upload speed) -- **Config File**: Can set `ioLimit` field in config file, or use `traffic` field (in bytes per second) +- **Config File**: Can set `ioLimit` field in config file (in bytes per second), can be overridden by `--io-limit` command-line argument -Example output: +Example output (uncompressed): ``` [backup-helper] IO rate limit set to: 100.0 MB/s @@ -259,6 +484,18 @@ Progress: 1.3 GB / 1.5 GB (86.7%) - 99.2 MB/s - Duration: 13.1s Average speed: 102.4 MB/s ``` +Example output (with compression): +``` +[backup-helper] IO rate limit set to: 100.0 MB/s + +Progress: 500 MB - 95.2 MB/s - Duration: 5.2s +Progress: 800 MB - 96.1 MB/s - Duration: 8.3s +[backup-helper] Upload completed! + Total uploaded: 1.0 GB + Duration: 10.5s + Average speed: 97.5 MB/s +``` + --- ## Multi-language Support @@ -273,7 +510,9 @@ Progress: 1.3 GB / 1.5 GB (86.7%) - 99.2 MB/s - Duration: 13.1s - **zstd not installed**: Please install zstd and ensure it is in your PATH. - **OSS upload failed**: Check OSS-related config parameters. - **MySQL connection failed**: Check DB host, port, username, password. -- **Log accumulation**: The program auto-cleans the logs directory, keeping only the latest 10 logs. +- **Log accumulation**: The program auto-cleans the log directory, keeping only the latest 10 log files. +- **Log location**: On operation completion or failure, displays the full path to the log file in the console for troubleshooting. +- **Transfer interruption**: If the connection is interrupted during transfer, the system will automatically detect and log the error, then abort the process. Please check the log file for detailed error information. --- diff --git a/README.md b/README.md index 0f55cd2..2edfc9a 100644 --- a/README.md +++ b/README.md @@ -14,9 +14,13 @@ - **Percona XtraBackup**:用于 MySQL 物理备份 - [下载地址](https://www.percona.com/downloads/Percona-XtraBackup-LATEST/) - 安装后确保 `xtrabackup` 命令在 PATH 中 +- **MySQL 服务器连接**:工具通过 TCP/IP 协议连接 MySQL 服务器 + - 不需要安装 `mysql` 命令行客户端工具 + - 不需要本地 `mysqld` 或 socket 文件 + - 只需要能够通过 TCP/IP 连接到 MySQL 服务器(host:port) ### 可选依赖 -- **zstd**:用于 zstd 压缩(当使用 `--compress-type=zstd` 时) +- **zstd**:用于 zstd 压缩(当使用 `--compress=zstd` 时) - [下载地址](https://github.com/facebook/zstd) - 安装后确保 `zstd` 命令在 PATH 中 @@ -34,28 +38,57 @@ "objectName": "backup/your-backup", // 只需前缀,实际文件名会自动加时间戳和后缀 "size": 104857600, "buffer": 10, - "traffic": 209715200, "mysqlHost": "127.0.0.1", "mysqlPort": 3306, "mysqlUser": "root", "mysqlPassword": "your-mysql-password", - "compress": true, + "compressType": "zstd", "mode": "oss", "streamPort": 9999, + "streamHost": "", + "mysqlVersion": { + "major": 5, + "minor": 7, + "patch": 0 + }, + "qwenAPIKey": "your-qwen-api-key", "enableHandshake": false, "streamKey": "your-secret-key", "existedBackup": "", "logDir": "/var/log/mysql-backup-helper", "estimatedSize": 0, - "ioLimit": 0 + "ioLimit": 0, + "downloadOutput": "", + "remoteOutput": "" } ``` - **objectName**:只需指定前缀,最终 OSS 文件名会自动变为 `objectName_YYYYMMDDHHMM后缀`,如 `backup/your-backup_202507181648.xb.zst` +- **compressType**:压缩类型,可选值:`zstd`、`qp`(qpress)或空字符串/`no`(不压缩)。支持所有模式(oss、stream) +- **streamPort**:流式传输端口,设为 `0` 表示自动查找可用端口 +- **streamHost**:远程主机 IP,用于主动推送模式 - **existedBackup**:已存在的备份文件路径,用于上传或流式传输(使用'-'表示从stdin读取) - **logDir**:日志文件存储目录,默认为 `/var/log/mysql-backup-helper`,支持相对路径和绝对路径 +- **downloadOutput**:下载模式默认输出路径 +- **remoteOutput**:SSH 模式下远程保存路径 +- **ioLimit**:IO 带宽限制(字节/秒),设为 `0` 使用默认值(200MB/s),设为 `-1` 表示不限速 +- **parallel**:并行线程数(默认:4),用于 xtrabackup 备份、压缩、解压缩和 xbstream 解包操作 +- **useMemory**:准备操作使用的内存大小(默认:1G),支持单位(如 '1G', '512M') +- **xtrabackupPath**:xtrabackup 二进制文件路径或包含 xtrabackup/xbstream 的目录路径。优先级:命令行参数 > 配置文件 > 环境变量 `XTRABACKUP_PATH` > PATH 查找 - 其它参数可通过命令行覆盖,命令行参数优先于配置文件。 +**注意**:工具会自动处理以下 xtrabackup 选项,无需用户配置: +- `--defaults-file`:可通过 `--defaults-file` 参数手动指定 MySQL 配置文件路径(my.cnf)。如果不指定,不会自动检测,避免使用错误的配置文件 +- `--close-files=1`:自动启用,用于处理大量表的情况 +- 文件描述符限制:自动设置为 655360(通过 ulimit) + +**兼容性说明**: +- 工具支持广泛的 xtrabackup/xbstream 版本,包括不支持 `--version` 参数的旧版本(如 xbstream 2.4.12) +- 工具会使用多重回退机制验证二进制文件的可执行性(`--version` → `-h` → `--help` → 无参数运行) +- `--prepare` 模式不需要 xbstream,仅需要 xtrabackup +- 工具不依赖 `mysql` 命令行客户端,通过 Go MySQL 驱动直接连接 MySQL 服务器 +- 获取配置文件路径时,如果无法查询 MySQL 变量(如权限不足),会优雅降级到检查常见路径 + --- ## 命令行参数 @@ -68,19 +101,30 @@ | --user | MySQL 用户名(优先于配置文件) | | --password | MySQL 密码(优先于配置文件,未指定则交互输入) | | --backup | 启动备份流程(否则只做参数检查) | +| --check | 预检查模式:执行预检验证。可单独使用(检查所有模式)或与其他模式组合(如 `--check --backup` 只检查备份模式) | | --download | 下载模式:从 TCP 流接收备份数据并保存 | +| --prepare | 准备模式:执行 xtrabackup --prepare 使备份可用于恢复 | | --output | 下载模式输出文件路径(使用 '-' 表示输出到 stdout,默认:backup_YYYYMMDDHHMMSS.xb) | -| --mode | 备份模式:`oss`(上传到 OSS)或 `stream`(推送到 TCP 端口) | -| --stream-port | 流式推送时监听的本地端口(如 9999,设为 0 则自动查找空闲端口) | -| --compress | 启用压缩 | -| --compress-type | 压缩类型:`qp`(qpress)、`zstd` | +| --target-dir | 目录:下载模式用于解包目录,准备模式用于备份目录 | +| --mode | 备份模式:`oss`(上传到 OSS)或 `stream`(推送到 TCP 端口,默认) | +| --log-file | 自定义日志文件名(相对于 logDir 或绝对路径)。如不指定,自动生成 `backup-helper-{timestamp}.log` | +| --stream-port | 流式推送时监听的本地端口(如 9999,设为 0 则自动查找空闲端口),或指定远程端口(当使用 --stream-host 时) | +| --stream-host | 远程主机 IP(如 '192.168.1.100')。指定后主动连接到远程服务器推送数据,类似 `nc host port` | +| --ssh | 使用 SSH 在远程主机自动启动接收服务(需要 --stream-host,依赖系统 SSH 配置) | +| --remote-output | SSH 模式下远程保存路径(默认:自动生成) | +| --compress | 压缩:`qp`(qpress)、`zstd` 或 `no`(不压缩)。不带值时默认使用 qp。支持所有模式(oss、stream) | | --lang | 语言:`zh`(中文)或 `en`(英文),不指定则自动检测系统语言 | -| --ai-diagnose=on/off| 备份失败时 AI 诊断,on 为自动诊断(需配置 Qwen API Key),off 为跳过,未指定时交互式询问 | +| --ai-diagnose=on/off| 操作失败时 AI 诊断,on 为询问用户是否执行诊断(配合 -y 可跳过询问直接诊断),off 为跳过,未指定时默认为 off(不执行诊断)。支持所有模块(BACKUP、PREPARE、TCP、OSS、EXTRACT等) | | --enable-handshake | TCP流推送启用握手认证(默认false,可在配置文件设置) | | --stream-key | TCP流推送握手密钥(默认空,可在配置文件设置) | | --existed-backup | 已存在的xtrabackup备份文件路径,用于上传或流式传输(使用'-'表示从stdin读取) | | --estimated-size | 预估备份大小,支持单位(如 '100MB', '1GB')或字节(用于进度跟踪) | | --io-limit | IO 带宽限制,支持单位(如 '100MB/s', '1GB/s')或字节/秒,使用 -1 表示不限速 | +| --parallel | 并行线程数(默认:4),用于 xtrabackup 备份(--parallel)、qpress 压缩(--compress-threads)、zstd 压缩/解压缩(-T)、xbstream 解包(--parallel)和 xtrabackup 解压缩(--parallel) | +| --use-memory | 准备操作使用的内存大小(如 '1G', '512M'),默认:1G | +| --defaults-file | MySQL 配置文件路径(my.cnf)。如果不指定,不会自动检测,也不会传递给 xtrabackup | +| --xtrabackup-path | xtrabackup 二进制文件路径或包含 xtrabackup/xbstream 的目录路径(覆盖配置文件和环境变量) | +| -y, --yes | 非交互模式:自动对所有提示回答 'yes'(包括目录覆盖确认和 AI 诊断确认) | | --version, -v | 显示版本信息 | --- @@ -108,9 +152,10 @@ go build -a -o backup-helper main.go ### 4. 指定压缩类型 ```sh -./backup-helper --config config.json --backup --mode=oss --compress-type=zstd -./backup-helper --config config.json --backup --mode=oss --compress-type=qp -./backup-helper --config config.json --backup --mode=oss --compress-type=none +./backup-helper --config config.json --backup --mode=oss --compress=zstd +./backup-helper --config config.json --backup --mode=oss --compress=qp +./backup-helper --config config.json --backup --mode=oss --compress=no +./backup-helper --config config.json --backup --mode=oss --compress ``` ### 5. 流式推送(stream 模式) @@ -135,26 +180,107 @@ nc 192.168.1.100 54321 > streamed-backup.xb - **stream 模式下所有压缩参数均无效,始终为原始物理备份流。** - **自动查找端口时会自动获取本地 IP 并显示在输出中,便于远程连接。** +- **使用 `--stream-host` 可以主动推送到远程服务器,接收端使用 `--download --stream-port` 在指定端口监听。** + +### 5.2. 主动推送到远程服务器 + +```sh +# 发送端:主动连接到远程服务器并推送数据 +./backup-helper --config config.json --backup --mode=stream --stream-host=192.168.1.100 --stream-port=9999 + +# 接收端:在远程服务器上监听并接收数据 +./backup-helper --download --stream-port=9999 +``` -### 6. 仅做参数检查(不备份) +这样可以实现类似 `xtrabackup | nc 192.168.1.100 9999` 的功能。 + +### 5.3. SSH 模式:自动在远程启动接收服务 + +如果有 SSH 权限,可以使用 `--ssh` 选项自动在远程主机启动接收服务,无需手动操作: + +```sh +# SSH 模式 + 自动发现端口(推荐) +./backup-helper --config config.json --backup --mode=stream \ + --stream-host=replica-server \ + --ssh \ + --remote-output=/backup/mysql_backup.xb \ + --estimated-size=10GB + +# SSH 模式 + 指定端口 +./backup-helper --config config.json --backup --mode=stream \ + --stream-host=replica-server \ + --ssh \ + --stream-port=9999 \ + --remote-output=/backup/mysql_backup.xb + +# 传统模式:需要提前在远程运行接收服务 +./backup-helper --config config.json --backup --mode=stream \ + --stream-host=replica-server \ + --stream-port=9999 +``` + +**SSH 模式说明:** +- 使用 `--ssh` 时,程序会通过 SSH 在远程主机自动执行 `backup-helper --download` 命令 +- 依赖系统已有的 SSH 配置(`~/.ssh/config`、密钥等),无需额外配置 +- 如果指定了 `--stream-port`,在远程的该端口启动服务;如果未指定,自动发现可用端口 +- 传输完成后自动清理远程进程 +- 类似 `rsync -e ssh` 的使用方式,如果 SSH 密钥已配置好,直接就能用 + +### 6. 预检查模式(--check) + +`--check` 模式可以单独使用,也可以与其他模式组合使用: + +```sh +# 单独使用:检查所有模式(BACKUP、DOWNLOAD、PREPARE) +./backup-helper --check + +# 检查所有模式(包括 MySQL 兼容性检查) +./backup-helper --check --host=127.0.0.1 --user=root --password=yourpass --port=3306 + +# 只检查备份模式(不执行备份) +./backup-helper --check --backup --host=127.0.0.1 --user=root --password=yourpass + +# 只检查下载模式(不执行下载) +./backup-helper --check --download --target-dir=/path/to/extract + +# 只检查准备模式(不执行准备) +./backup-helper --check --prepare --target-dir=/path/to/backup + +# 指定压缩类型进行检查 +./backup-helper --check --compress=zstd --host=127.0.0.1 --user=root --password=yourpass +``` + +**检查内容:** +- **依赖检查**:验证 xtrabackup、xbstream、zstd、qpress 等工具是否已安装 +- **MySQL 兼容性检查**(备份模式):MySQL 版本、xtrabackup 版本兼容性、数据大小估算、复制参数、配置文件验证 +- **系统资源检查**(单独 --check 时):CPU 核心数、内存大小、网络接口 +- **参数推荐**(备份模式):基于系统资源推荐 parallel、io-limit、use-memory 等参数 +- **目标目录检查**(下载/准备模式):验证目录是否存在、可写、包含备份文件等 + +**重要提示:** +- 当使用 `--backup`、`--download` 或 `--prepare` 时,工具会在执行前自动进行预检查 +- 如果预检查发现重大问题(ERROR),工具会停止执行并提示修复 +- 使用 `--check` 组合模式(如 `--check --backup`)时,只进行检查,不执行实际操作 + +### 7. 仅做参数检查(不备份) ```sh ./backup-helper --config config.json ``` -### 7. 纯命令行参数(无 config.json) +### 8. 纯命令行参数(无 config.json) ```sh -./backup-helper --host=127.0.0.1 --user=root --password=123456 --port=3306 --backup --mode=oss --compress-type=qp +./backup-helper --host=127.0.0.1 --user=root --password=123456 --port=3306 --backup --mode=oss --compress=qp ``` -### 8. 上传已存在的备份文件到 OSS +### 9. 上传已存在的备份文件到 OSS ```sh ./backup-helper --config config.json --existed-backup backup.xb --mode=oss ``` -### 9. 通过 TCP 流式传输已存在的备份文件 +### 10. 通过 TCP 流式传输已存在的备份文件 ```sh ./backup-helper --config config.json --existed-backup backup.xb --mode=stream --stream-port=9999 @@ -162,33 +288,33 @@ nc 192.168.1.100 54321 > streamed-backup.xb nc 127.0.0.1 9999 > streamed-backup.xb ``` -### 10. 使用 cat 命令从 stdin 读取并上传到 OSS +### 11. 使用 cat 命令从 stdin 读取并上传到 OSS ```sh cat backup.xb | ./backup-helper --config config.json --existed-backup - --mode=oss ``` -### 11. 使用 cat 命令从 stdin 读取并通过 TCP 传输 +### 12. 使用 cat 命令从 stdin 读取并通过 TCP 传输 ```sh cat backup.xb | ./backup-helper --config config.json --existed-backup - --mode=stream --stream-port=9999 ``` -### 12. 手动指定上传限速(如限制到 100 MB/s) +### 13. 手动指定上传限速(如限制到 100 MB/s) ```sh ./backup-helper --config config.json --backup --mode=oss --io-limit 100MB/s # 支持单位:KB/s, MB/s, GB/s, TB/s,也可以直接使用字节/秒 ``` -### 13. 禁用限速(不限速上传) +### 14. 禁用限速(不限速上传) ```sh ./backup-helper --config config.json --backup --mode=oss --io-limit -1 # 使用 -1 表示完全禁用限速,以最大速度上传 ``` -### 14. 指定预估大小以显示准确的进度 +### 15. 指定预估大小以显示准确的进度 ```sh ./backup-helper --config config.json --backup --mode=oss --estimated-size 1GB @@ -196,7 +322,31 @@ cat backup.xb | ./backup-helper --config config.json --existed-backup - --mode=s # 例如:--estimated-size 1073741824 或 --estimated-size 1GB ``` -### 15. 下载模式:从 TCP 流接收备份数据 +### 15. 准备备份(Prepare Mode) + +备份完成后,需要执行 prepare 操作使备份可用于恢复: + +```sh +# 基本用法 +./backup-helper --prepare --target-dir=/path/to/backup + +# 指定并行线程数和内存大小 +./backup-helper --prepare --target-dir=/path/to/backup --parallel=8 --use-memory=2G + +# 使用配置文件 +./backup-helper --config config.json --prepare --target-dir=/path/to/backup + +# 可选:提供 MySQL 连接信息和 --defaults-file +./backup-helper --prepare --target-dir=/path/to/backup --host=127.0.0.1 --user=root --port=3306 --defaults-file=/etc/my.cnf +``` + +**说明**: +- `--target-dir`:必需,指定要准备的备份目录 +- `--parallel`:并行线程数,默认 4(可使用配置文件或在命令行指定) +- `--use-memory`:准备操作使用的内存大小,默认 1G(支持单位:G, M, K) +- `--defaults-file`:可选,手动指定 MySQL 配置文件路径(如果不指定,不会自动检测) + +### 16. 下载模式:从 TCP 流接收备份数据 ```sh # 下载到默认文件(backup_YYYYMMDDHHMMSS.xb) @@ -208,31 +358,104 @@ cat backup.xb | ./backup-helper --config config.json --existed-backup - --mode=s # 流式输出到 stdout(可用于管道压缩或解包) ./backup-helper --download --stream-port 9999 --output - | zstd -d > backup.xb -# 直接使用 xbstream 解包到目录 +# 直接使用 xbstream 解包到目录(未压缩备份) ./backup-helper --download --stream-port 9999 --output - | xbstream -x -C /path/to/extract/dir -# 如果备份是压缩的,需要先解压缩再解包 -./backup-helper --download --stream-port 9999 --output - | xbstream -x -C /path/to/extract/dir --decompress --decompress-threads=4 +# Zstd 压缩备份:流式解压后解包(推荐方式) +./backup-helper --download --stream-port 9999 --compress=zstd --target-dir /path/to/extract/dir + +# Zstd 压缩备份:流式输出到 stdout(可用于管道到 xbstream) +./backup-helper --download --stream-port 9999 --compress=zstd --output - | xbstream -x -C /path/to/extract/dir + +# Qpress 压缩备份:自动解压和解包(注意:需要先保存文件,不支持流式解压) +./backup-helper --download --stream-port 9999 --compress=qp --target-dir /path/to/extract/dir + +# 保存 zstd 压缩的备份(自动解压) +./backup-helper --download --stream-port 9999 --compress=zstd --output my_backup.xb # 带限速下载 ./backup-helper --download --stream-port 9999 --io-limit 100MB/s # 带进度显示(需要提供预估大小) ./backup-helper --download --stream-port 9999 --estimated-size 1GB + +# 非交互模式:自动确认所有提示 +./backup-helper --download --stream-port 9999 --target-dir /backup/mysql --compress=zstd -y ``` +**注意**: +- 如果 `--target-dir` 指定的目录已存在且不为空,程序会询问是否覆盖现有文件 +- 输入 `y` 或 `yes` 继续提取(可能覆盖现有文件) +- 输入 `n` 或任何其他值取消提取并退出 +- 使用 `-y` 或 `--yes` 参数可以自动确认所有提示(非交互模式),适合脚本和自动化场景 + +**下载模式压缩类型说明:** + +- **Zstd 压缩(`--compress=zstd`)**: + - 支持流式解压,可直接解压并解包到目录 + - 使用 `--target-dir` 时,自动执行 `zstd -d | xbstream -x` + - 使用 `--output -` 时,输出解压后的流,可继续管道到 `xbstream` + +- **Qpress 压缩(`--compress=qp` 或 `--compress`)**: + - **不支持流式解压**(MySQL 5.7 的 xbstream 不支持 `--decompress` 流式操作) + - 使用 `--target-dir` 时,会先保存压缩文件,然后使用 `xbstream -x` 解包,最后使用 `xtrabackup --decompress` 解压 + - 使用 `--output -` 时,会警告并输出原始压缩流 + +- **未压缩备份**: + - 不指定 `--compress` 时,直接保存或解包 + - 使用 `--target-dir` 时,直接使用 `xbstream -x` 解包 + --- ## 日志与对象命名 -- 所有备份日志自动保存在 `logs/` 目录,仅保留最近 10 个日志文件。 +### 统一日志系统 + +工具采用统一日志系统,将所有关键操作的日志记录到单个日志文件中: + +- **日志文件命名**:默认自动生成 `backup-helper-{timestamp}.log`(如 `backup-helper-20251106105903.log`),可通过 `--log-file` 或配置文件中的 `logFileName` 自定义文件名(支持相对路径和绝对路径) +- **日志存储位置**:默认在 `/var/log/mysql-backup-helper`,可通过 `--config` 或配置文件中的 `logDir` 指定(支持相对路径和绝对路径) +- **日志内容**:统一记录所有操作步骤 + - **[BACKUP]**:xtrabackup 备份操作 + - **[PREPARE]**:xtrabackup prepare 操作 + - **[TCP]**:TCP 流传输(发送/接收) + - **[OSS]**:OSS 上传操作 + - **[XBSTREAM]**:xbstream 解包操作 + - **[DECOMPRESS]**:解压缩操作(zstd/qpress) + - **[EXTRACT]**:提取操作 + - **[SYSTEM]**:系统级别的日志 + +- **日志格式**:每条日志包含时间戳和模块前缀,格式为 `[YYYY-MM-DD HH:MM:SS] [MODULE] 消息内容` +- **日志清理**:自动清理旧日志,仅保留最近 10 个日志文件 +- **错误处理**: + - 操作完成或失败时,会在控制台显示日志文件位置 + - 失败时自动提取错误摘要并显示在控制台 + - 所有模块支持 AI 诊断(需配置 Qwen API Key) + - **传输中断检测**:自动检测 TCP 连接中断、进程异常终止等情况,记录到日志文件并中止流程,避免处理不完整的数据 + +示例日志内容: +``` +[2025-11-06 10:59:03] [SYSTEM] === MySQL Backup Helper Log Started === +[2025-11-06 10:59:03] [SYSTEM] Timestamp: 2025-11-06 10:59:03 +[2025-11-06 10:59:03] [BACKUP] Starting backup operation +[2025-11-06 10:59:03] [BACKUP] Command: xtrabackup --backup --stream=xbstream ... +[2025-11-06 10:59:03] [TCP] Listening on 192.168.1.100:9999 +[2025-11-06 10:59:03] [TCP] Client connected +[2025-11-06 10:59:03] [TCP] Transfer started +``` + +### OSS 对象命名 + - OSS 对象名自动加时间戳,如 `backup/your-backup_202507181648.xb.zst`,便于归档和查找。 ## 进度跟踪 工具会在备份上传过程中实时显示进度信息: -- **实时进度**:显示已上传/已下载大小、总大小、百分比、传输速度和持续时间 +- **实时进度**:显示已上传/已下载大小、总大小、百分比(未压缩时)、传输速度和持续时间 + - 启用压缩时,不显示百分比(因为压缩后的实际大小与原始大小不一致) + - 未压缩时,显示完整进度:`Progress: 100 MB / 500 MB (20.0%) - 50 MB/s - Duration: 2s` + - 压缩时,仅显示:`Progress: 100 MB - 50 MB/s - Duration: 2s` - **最终统计**:显示总上传/总下载大小、持续时间、平均速度 - **大小计算**: - 如果提供了 `--estimated-size`,直接使用该值(支持单位:KB, MB, GB, TB) @@ -247,9 +470,9 @@ cat backup.xb | ./backup-helper --config config.json --existed-backup - --mode=s - 支持单位:`KB/s`, `MB/s`, `GB/s`, `TB/s`(如 `100MB/s`, `1GB/s`) - 也可以直接使用字节/秒(如 `104857600` 表示 100 MB/s) - 使用 `-1` 表示完全禁用限速(不限速上传) -- **配置文件**:可以在配置文件中设置 `ioLimit` 字段,或使用 `traffic` 字段(单位:字节/秒) +- **配置文件**:可以在配置文件中设置 `ioLimit` 字段(单位:字节/秒),支持使用 `--io-limit` 命令行参数覆盖 -示例输出: +示例输出(未压缩): ``` [backup-helper] IO rate limit set to: 100.0 MB/s @@ -261,6 +484,18 @@ Progress: 1.3 GB / 1.5 GB (86.7%) - 99.2 MB/s - Duration: 13.1s Average speed: 102.4 MB/s ``` +示例输出(启用压缩): +``` +[backup-helper] IO rate limit set to: 100.0 MB/s + +Progress: 500 MB - 95.2 MB/s - Duration: 5.2s +Progress: 800 MB - 96.1 MB/s - Duration: 8.3s +[backup-helper] Upload completed! + Total uploaded: 1.0 GB + Duration: 10.5s + Average speed: 97.5 MB/s +``` + --- ## 多语言支持 @@ -275,7 +510,9 @@ Progress: 1.3 GB / 1.5 GB (86.7%) - 99.2 MB/s - Duration: 13.1s - **zstd 未安装**:请先安装 zstd 并确保在 PATH 中。 - **OSS 上传失败**:请检查配置文件中的 OSS 相关参数。 - **MySQL 连接失败**:请检查数据库主机、端口、用户名、密码。 -- **日志堆积**:程序会自动清理 logs 目录,仅保留最近 10 个日志。 +- **日志堆积**:程序会自动清理日志目录,仅保留最近 10 个日志文件。 +- **日志位置**:操作完成或失败时,会在控制台显示日志文件完整路径,便于排查问题。 +- **传输中断**:如果传输过程中连接中断,系统会自动检测并记录错误日志,中止流程。请检查日志文件了解详细错误信息。 --- diff --git a/example-config.json b/example-config.json index f4d7c91..a618d80 100644 --- a/example-config.json +++ b/example-config.json @@ -7,20 +7,30 @@ "objectName": "backup/mysql_backup", "size": 104857600, "buffer": 10, - "traffic": 209715200, "mysqlHost": "127.0.0.1", "mysqlPort": 3306, "mysqlUser": "root", "mysqlPassword": "password", - "compress": true, "compressType": "zstd", - "mode": "oss", + "mode": "stream", "streamPort": 9999, + "streamHost": "", + "mysqlVersion": { + "major": 5, + "minor": 7, + "patch": 0 + }, "qwenAPIKey": "your-qwen-api-key", - "enableHandshake": true, + "enableHandshake": false, "streamKey": "your-secret-key", - "existedBackup": "your-existed-backup-file-path", + "existedBackup": "", + "logDir": "/var/log/mysql-backup-helper", + "logFileName": "", "estimatedSize": 0, "ioLimit": 0, - "downloadOutput": "" + "downloadOutput": "", + "remoteOutput": "", + "parallel": 4, + "useMemory": "1G", + "xtrabackupPath": "" } diff --git a/main.go b/main.go index 82ef6e2..20ed0cd 100644 --- a/main.go +++ b/main.go @@ -2,10 +2,12 @@ package main import ( "backup-helper/utils" + "database/sql" "flag" "fmt" "io" "os" + "path/filepath" "strings" "time" @@ -15,15 +17,103 @@ import ( "golang.org/x/text/language" ) +// contains checks if a string contains a substring (case-insensitive) +func contains(s, substr string) bool { + return strings.Contains(strings.ToLower(s), strings.ToLower(substr)) +} + +// isDirEmpty checks if a directory is empty +func isDirEmpty(dir string) (bool, error) { + info, err := os.Stat(dir) + if err != nil { + if os.IsNotExist(err) { + return true, nil // Directory doesn't exist, consider it empty + } + return false, err + } + + if !info.IsDir() { + return false, fmt.Errorf("%s is not a directory", dir) + } + + entries, err := os.ReadDir(dir) + if err != nil { + return false, err + } + + return len(entries) == 0, nil +} + +// promptOverwrite asks user if they want to overwrite existing files in targetDir +// If autoYes is true, automatically returns true and shows a warning +func promptOverwrite(targetDir string, autoYes bool) bool { + i18n.Printf("Warning: Target directory '%s' already exists and is not empty.\n", targetDir) + i18n.Printf("Extracting to this directory may overwrite existing files.\n") + + if autoYes { + i18n.Printf("Auto-confirming overwrite (--yes/-y flag is set)...\n") + return true + } + + i18n.Printf("Do you want to continue? (y/n): ") + + var input string + fmt.Scanln(&input) + input = strings.TrimSpace(strings.ToLower(input)) + + return input == "y" || input == "yes" +} + +// promptAIDiagnosis asks user if they want to use AI diagnosis +// If autoYes is true, automatically returns true and shows a warning +func promptAIDiagnosis(autoYes bool) bool { + if autoYes { + i18n.Printf("Auto-confirming AI diagnosis (--yes/-y flag is set)...\n") + return true + } + + var input string + i18n.Printf("Would you like to use AI diagnosis? (y/n): ") + fmt.Scanln(&input) + input = strings.TrimSpace(strings.ToLower(input)) + return input == "y" || input == "yes" +} + +// clearDirectory removes all files and subdirectories in the given directory +func clearDirectory(dir string) error { + entries, err := os.ReadDir(dir) + if err != nil { + return err + } + + for _, entry := range entries { + path := filepath.Join(dir, entry.Name()) + if entry.IsDir() { + if err := os.RemoveAll(path); err != nil { + return err + } + } else { + if err := os.Remove(path); err != nil { + return err + } + } + } + + return nil +} + func main() { utils.InitI18nAuto() var doBackup bool var doDownload bool + var doPrepare bool + var doCheck bool var configPath string var host, user, password string var port int var streamPort int + var streamHost string var mode string var compressType string var langFlag string @@ -36,13 +126,29 @@ func main() { var estimatedSizeStr string var estimatedSize int64 var ioLimitStr string - var ioLimit int64 + var useSSH bool + var remoteOutput string + var targetDir string + var parallel int + var useMemory string + var autoYes bool + var xtrabackupPath string + var defaultsFile string + var logFileName string flag.BoolVar(&doBackup, "backup", false, "Run xtrabackup and upload to OSS") + flag.BoolVar(&autoYes, "y", false, "Automatically answer 'yes' to all prompts (non-interactive mode)") + flag.BoolVar(&autoYes, "yes", false, "Automatically answer 'yes' to all prompts (non-interactive mode)") flag.BoolVar(&doDownload, "download", false, "Download backup from TCP stream (listen on port)") + flag.BoolVar(&doPrepare, "prepare", false, "Prepare backup for restore (xtrabackup --prepare)") + flag.BoolVar(&doCheck, "check", false, "Perform pre-flight validation checks (dependencies, MySQL compatibility, system resources, parameter recommendations)") flag.StringVar(&downloadOutput, "output", "", "Output file path for download mode (use '-' for stdout, default: backup_YYYYMMDDHHMMSS.xb)") + flag.StringVar(&targetDir, "target-dir", "", "Directory for extraction (download mode) or backup directory (prepare mode)") flag.StringVar(&estimatedSizeStr, "estimated-size", "", "Estimated backup size with unit (e.g., '100MB', '1GB', '500KB') or bytes (for progress tracking)") flag.StringVar(&ioLimitStr, "io-limit", "", "IO bandwidth limit with unit (e.g., '100MB/s', '1GB/s', '500KB/s') or bytes per second. Use -1 for unlimited speed") + flag.StringVar(&useMemory, "use-memory", "", "Memory to use for prepare operation (e.g., '1G', '512M'). Default: 1G") + flag.StringVar(&xtrabackupPath, "xtrabackup-path", "", "Path to xtrabackup binary or directory containing xtrabackup/xbstream (overrides config and environment variable)") + flag.StringVar(&defaultsFile, "defaults-file", "", "Path to MySQL configuration file (my.cnf). If not specified, --defaults-file will not be passed to xtrabackup") flag.StringVar(&existedBackup, "existed-backup", "", "Path to existing xtrabackup backup file to upload (use '-' for stdin)") flag.BoolVar(&showVersion, "version", false, "Show version information") flag.BoolVar(&showVersion, "v", false, "Show version information (shorthand)") @@ -51,13 +157,20 @@ func main() { flag.IntVar(&port, "port", 0, "Port number to use for connection") flag.StringVar(&user, "user", "", "User for login") flag.StringVar(&password, "password", "", "Password to use when connecting to server. If password is not given it's asked from the tty.") - flag.IntVar(&streamPort, "stream-port", 0, "Local TCP port for streaming (0 = auto-find available port)") - flag.StringVar(&mode, "mode", "oss", "Backup mode: oss (upload to OSS) or stream (push to TCP port)") - flag.StringVar(&compressType, "compress-type", "", "Compress type: qp(qpress)/zstd, priority is higher than config file") + flag.IntVar(&streamPort, "stream-port", 0, "Local TCP port for streaming (0 = auto-find available port), or remote port when --stream-host is specified") + flag.StringVar(&streamHost, "stream-host", "", "Remote host IP for pushing data (e.g., '192.168.1.100'). When specified, actively connects to remote instead of listening locally") + flag.StringVar(&mode, "mode", "stream", "Backup mode: oss (upload to OSS) or stream (push to TCP port)") + flag.StringVar(&compressType, "compress", "__NOT_SET__", "Compression: qp(qpress)/zstd/no, or no value (default: qp). Priority is higher than config file") flag.StringVar(&langFlag, "lang", "", "Language: zh (Chinese) or en (English), auto-detect if unset") flag.StringVar(&aiDiagnoseFlag, "ai-diagnose", "", "AI diagnosis on backup failure: on/off. If not set, prompt interactively.") flag.BoolVar(&enableHandshake, "enable-handshake", false, "Enable handshake for TCP streaming (default: false, can be set in config)") flag.StringVar(&streamKey, "stream-key", "", "Handshake key for TCP streaming (default: empty, can be set in config)") + var timeout int + flag.IntVar(&timeout, "timeout", 0, "TCP connection timeout in seconds for listening (default: 60, max: 3600)") + flag.BoolVar(&useSSH, "ssh", false, "Use SSH to start receiver on remote host (requires --stream-host)") + flag.StringVar(&remoteOutput, "remote-output", "", "Remote output path when using SSH mode (default: auto-generated)") + flag.IntVar(¶llel, "parallel", 0, "Number of parallel threads for xtrabackup (default: 4)") + flag.StringVar(&logFileName, "log-file", "", "Custom log file name (relative to logDir or absolute path). If not specified, auto-generates backup-helper-{timestamp}.log") flag.Parse() @@ -105,13 +218,54 @@ func main() { if password == "" { password = cfg.MysqlPassword } - if compressType == "" && cfg.CompressType != "" { - compressType = cfg.CompressType + // Handle --compress flag + // If compressType is "__NOT_SET__", flag was not passed + // If compressType is "" and flag was passed, user passed --compress without value (default to qp) + // If compressType has a value, use it + if compressType == "__NOT_SET__" { + // Flag was not passed, use config or empty + if cfg.CompressType != "" { + compressType = cfg.CompressType + } else { + compressType = "" + } + } else { + // Flag was passed + if compressType == "" { + // --compress was passed but empty value (--compress= or --compress ""), default to qp + compressType = "qp" + } + // Otherwise use the provided value (zstd, qp, no, etc.) + } + // Normalize: "no" means no compression + if compressType == "no" { + compressType = "" } if existedBackup == "" && cfg.ExistedBackup != "" { existedBackup = cfg.ExistedBackup } + // Handle --xtrabackup-path flag (command-line flag overrides config) + if xtrabackupPath != "" { + cfg.XtrabackupPath = xtrabackupPath + } else if cfg.XtrabackupPath == "" { + // If not set in flag or config, check environment variable + // (ResolveXtrabackupPath will handle this, but we can also set it here for consistency) + if envPath := os.Getenv("XTRABACKUP_PATH"); envPath != "" { + cfg.XtrabackupPath = envPath + } + } + + // Handle --defaults-file flag (command-line flag overrides config) + if defaultsFile != "" { + cfg.DefaultsFile = defaultsFile + } + + // Handle --log-file flag (command-line flag overrides config) + if logFileName != "" { + cfg.LogFileName = logFileName + } + // Parse estimatedSize from command line or config if estimatedSizeStr != "" { parsedSize, err := utils.ParseSize(estimatedSizeStr) @@ -131,21 +285,425 @@ func main() { i18n.Printf("Error parsing --io-limit '%s': %v\n", ioLimitStr, err) os.Exit(1) } - ioLimit = parsedLimit - } else if ioLimit == 0 && cfg.IOLimit > 0 { - ioLimit = cfg.IOLimit + cfg.IOLimit = parsedLimit + } + // cfg.IOLimit now contains: -1 (unlimited), 0 (use default), or >0 (specified value) + + // Parse parallel from command line or config + if parallel > 0 { + cfg.Parallel = parallel + } else if parallel == 0 && cfg.Parallel == 0 { + // Use default (4) if not specified in command line or config + cfg.Parallel = 4 + } + + // Parse useMemory from command line or config + if useMemory != "" { + cfg.UseMemory = useMemory + } else if cfg.UseMemory == "" { + // Use default (1G) if not specified in command line or config + cfg.UseMemory = "1G" + } + + // Parse timeout from command line or config + if timeout > 0 { + cfg.Timeout = timeout + } else if cfg.Timeout == 0 { + // Use default (60) if not specified in command line or config + cfg.Timeout = 60 + } + // Enforce maximum timeout: 3600 seconds (1 hour) + if cfg.Timeout > 3600 { + cfg.Timeout = 3600 + } + + // 4. Handle --check mode + if doCheck { + outputHeader() + + // Determine effective compression type + effectiveCompressType := compressType + if effectiveCompressType == "__NOT_SET__" { + effectiveCompressType = cfg.CompressType + } + if effectiveCompressType == "no" { + effectiveCompressType = "" + } + + // Parse stream-host and stream-port for connectivity checks + var checkStreamHost string + var checkStreamPort int + if streamHost != "" { + checkStreamHost = streamHost + } else if cfg.StreamHost != "" { + checkStreamHost = cfg.StreamHost + } + if streamPort > 0 { + checkStreamPort = streamPort + } else if !isFlagPassed("stream-port") && cfg.StreamPort > 0 { + checkStreamPort = cfg.StreamPort + } + + // Get MySQL connection if available + var db *sql.DB + if host != "" && user != "" { + if password == "" { + i18n.Printf("Please input mysql-server password (optional, for MySQL compatibility checks): ") + pwd, _ := term.ReadPassword(0) + i18n.Printf("\n") + password = string(pwd) + } + if password != "" { + i18n.Printf("Connecting to MySQL server for compatibility checks...\n") + db = utils.GetConnection(host, port, user, password) + defer db.Close() + } + } + + // Check if --check is combined with other modes + if doBackup { + // --check --backup: only check backup mode + i18n.Printf("[backup-helper] Running pre-flight checks for BACKUP mode...\n\n") + results := utils.CheckForBackupMode(cfg, effectiveCompressType, db, checkStreamHost, checkStreamPort) + utils.PrintCheckResults(i18n.Sprintf("Backup Mode Checks"), results) + + hasCriticalError := false + for _, result := range results { + if result.Status == "ERROR" { + hasCriticalError = true + break + } + } + + i18n.Printf("\n=== %s ===\n", i18n.Sprintf("Check Summary")) + if hasCriticalError { + i18n.Printf("[ERROR] Critical errors found. Please fix them before proceeding with backup.\n") + os.Exit(1) + } else { + i18n.Printf("[OK] Pre-flight checks completed. Backup mode is ready.\n") + } + return + } else if doDownload { + // --check --download: only check download mode + i18n.Printf("[backup-helper] Running pre-flight checks for DOWNLOAD mode...\n\n") + results := utils.CheckForDownloadMode(cfg, effectiveCompressType, targetDir, checkStreamHost, checkStreamPort) + utils.PrintCheckResults(i18n.Sprintf("Download Mode Checks"), results) + + hasCriticalError := false + for _, result := range results { + if result.Status == "ERROR" { + hasCriticalError = true + break + } + } + + i18n.Printf("\n=== %s ===\n", i18n.Sprintf("Check Summary")) + if hasCriticalError { + i18n.Printf("[ERROR] Critical errors found. Please fix them before proceeding with download.\n") + os.Exit(1) + } else { + i18n.Printf("[OK] Pre-flight checks completed. Download mode is ready.\n") + } + return + } else if doPrepare { + // --check --prepare: only check prepare mode + i18n.Printf("[backup-helper] Running pre-flight checks for PREPARE mode...\n\n") + results := utils.CheckForPrepareMode(cfg, targetDir, db) + utils.PrintCheckResults(i18n.Sprintf("Prepare Mode Checks"), results) + + hasCriticalError := false + for _, result := range results { + if result.Status == "ERROR" { + hasCriticalError = true + break + } + } + + i18n.Printf("\n=== %s ===\n", i18n.Sprintf("Check Summary")) + if hasCriticalError { + i18n.Printf("[ERROR] Critical errors found. Please fix them before proceeding with prepare.\n") + os.Exit(1) + } else { + i18n.Printf("[OK] Pre-flight checks completed. Prepare mode is ready.\n") + } + return + } else { + // Only --check: check all modes and show what would happen + i18n.Printf("[backup-helper] Running comprehensive pre-flight checks for all modes...\n\n") + + // Check system resources (common to all modes) + resources := utils.CheckSystemResources() + systemResults := []utils.CheckResult{ + { + Status: "INFO", + Item: "CPU cores", + Value: fmt.Sprintf("%d", resources.CPUCores), + Message: "", + }, + } + if resources.TotalMemory > 0 { + systemResults = append(systemResults, utils.CheckResult{ + Status: "INFO", + Item: "Total memory", + Value: formatBytes(resources.TotalMemory), + Message: "", + }) + } + if resources.AvailableMemory > 0 { + systemResults = append(systemResults, utils.CheckResult{ + Status: "INFO", + Item: "Available memory", + Value: formatBytes(resources.AvailableMemory), + Message: "", + }) + } + if resources.NetworkInfo != "" { + systemResults = append(systemResults, utils.CheckResult{ + Status: "INFO", + Item: "Network interfaces", + Value: resources.NetworkInfo, + Message: "", + }) + } + utils.PrintCheckResults(i18n.Sprintf("System Resources"), systemResults) + + // Check BACKUP mode + i18n.Printf("\n--- Checking BACKUP mode ---\n") + // Note: In standalone --check mode, we don't check TCP connectivity + // TCP connectivity is only checked when --check is combined with --backup or --download + backupResults := utils.CheckForBackupMode(cfg, effectiveCompressType, db, "", 0) + utils.PrintCheckResults(i18n.Sprintf("Backup Mode"), backupResults) + + backupHasError := false + for _, result := range backupResults { + if result.Status == "ERROR" { + backupHasError = true + break + } + } + if backupHasError { + i18n.Printf("[WARNING] BACKUP mode has critical errors and cannot proceed.\n") + } else { + // Calculate MySQL data size for recommendations + var mysqlSize int64 + if db != nil { + datadir, err := utils.GetDatadirFromMySQL(db) + if err == nil { + mysqlSize, _ = utils.CalculateBackupSize(datadir) + } + } + paramResults := utils.RecommendParameters(resources, mysqlSize, effectiveCompressType, cfg) + utils.PrintCheckResults(i18n.Sprintf("Recommended Parameters for Backup"), paramResults) + i18n.Printf("[OK] BACKUP mode is ready.\n") + } + + // Check DOWNLOAD mode + i18n.Printf("\n--- Checking DOWNLOAD mode ---\n") + // Note: In standalone --check mode, we don't check TCP connectivity + // TCP connectivity is only checked when --check is combined with --backup or --download + downloadResults := utils.CheckForDownloadMode(cfg, effectiveCompressType, targetDir, "", 0) + utils.PrintCheckResults(i18n.Sprintf("Download Mode"), downloadResults) + + downloadHasError := false + for _, result := range downloadResults { + if result.Status == "ERROR" { + downloadHasError = true + break + } + } + if downloadHasError { + i18n.Printf("[WARNING] DOWNLOAD mode has critical errors and cannot proceed.\n") + } else { + i18n.Printf("[OK] DOWNLOAD mode is ready.\n") + } + + // Check PREPARE mode + i18n.Printf("\n--- Checking PREPARE mode ---\n") + prepareResults := utils.CheckForPrepareMode(cfg, targetDir, db) + utils.PrintCheckResults(i18n.Sprintf("Prepare Mode"), prepareResults) + + prepareHasError := false + for _, result := range prepareResults { + if result.Status == "ERROR" { + prepareHasError = true + break + } + } + if prepareHasError { + i18n.Printf("[WARNING] PREPARE mode has critical errors and cannot proceed.\n") + } else { + i18n.Printf("[OK] PREPARE mode is ready.\n") + } + + // Summary + i18n.Printf("\n=== %s ===\n", i18n.Sprintf("Check Summary")) + i18n.Printf("BACKUP mode: %s\n", map[bool]string{true: "[ERROR] Cannot proceed", false: "[OK] Ready"}[backupHasError]) + i18n.Printf("DOWNLOAD mode: %s\n", map[bool]string{true: "[ERROR] Cannot proceed", false: "[OK] Ready"}[downloadHasError]) + i18n.Printf("PREPARE mode: %s\n", map[bool]string{true: "[ERROR] Cannot proceed", false: "[OK] Ready"}[prepareHasError]) + i18n.Printf("\nTo run a specific mode, use: --backup, --download, or --prepare\n") + i18n.Printf("To check a specific mode only, use: --check --backup, --check --download, or --check --prepare\n") + } + return } - // Update traffic config based on ioLimit - if ioLimit == -1 { - cfg.Traffic = 0 // 0 means unlimited - } else if ioLimit > 0 { - cfg.Traffic = ioLimit + // 5. Handle --prepare mode + if doPrepare { + // Pre-check for prepare mode + var db *sql.DB + if host != "" && user != "" && password != "" { + db = utils.GetConnection(host, port, user, password) + defer db.Close() + } else if host != "" && user != "" { + // Password might be prompted later, but for now we can check without it + } + + prepareResults := utils.CheckForPrepareMode(cfg, targetDir, db) + hasCriticalError := false + for _, result := range prepareResults { + if result.Status == "ERROR" { + hasCriticalError = true + i18n.Printf("[ERROR] %s: %s - %s\n", result.Item, result.Value, result.Message) + } + } + if hasCriticalError { + i18n.Printf("\n[ERROR] Pre-flight checks failed. Please fix the errors above before proceeding.\n") + os.Exit(1) + } + + if targetDir == "" { + i18n.Printf("Error: --target-dir is required for --prepare mode\n") + os.Exit(1) + } + + // Check if target directory exists + if _, err := os.Stat(targetDir); os.IsNotExist(err) { + i18n.Printf("Error: Backup directory does not exist: %s\n", targetDir) + os.Exit(1) + } + + // Create log context + logCtx, err := utils.NewLogContext(cfg.LogDir, cfg.LogFileName) + if err != nil { + i18n.Printf("Failed to create log context: %v\n", err) + os.Exit(1) + } + defer logCtx.Close() + + outputHeader() + i18n.Printf("[backup-helper] Preparing backup in directory: %s\n", targetDir) + i18n.Printf("[backup-helper] Parallel threads: %d\n", cfg.Parallel) + i18n.Printf("[backup-helper] Use memory: %s\n", cfg.UseMemory) + logCtx.WriteLog("PREPARE", "Starting prepare operation") + logCtx.WriteLog("PREPARE", "Target directory: %s", targetDir) + + // Try to get MySQL connection for defaults-file (optional, can be nil) + // db may already be set from pre-check above + if db == nil && host != "" && user != "" { + if password == "" { + i18n.Printf("Please input mysql-server password (optional, for defaults-file): ") + pwd, _ := term.ReadPassword(0) + i18n.Printf("\n") + password = string(pwd) + } + if password != "" { + db = utils.GetConnection(host, port, user, password) + defer db.Close() + } + } + + cmd, err := utils.RunXtrabackupPrepare(cfg, targetDir, db, logCtx) + if err != nil { + logCtx.WriteLog("PREPARE", "Failed to start prepare: %v", err) + i18n.Printf("Failed to start prepare: %v\n", err) + os.Exit(1) + } + + // Wait for prepare to complete + err = cmd.Wait() + if err != nil { + logCtx.WriteLog("PREPARE", "Prepare failed: %v", err) + // Read log content for error extraction + logContent, err2 := os.ReadFile(logCtx.GetFileName()) + if err2 == nil { + errorSummary := utils.ExtractErrorSummary("PREPARE", string(logContent)) + if errorSummary != "" { + i18n.Printf("Prepare failed. Error summary:\n%s\n", errorSummary) + } else { + i18n.Printf("Prepare failed: %v\n", err) + } + } else { + i18n.Printf("Prepare failed: %v\n", err) + } + i18n.Printf("Log file: %s\n", logCtx.GetFileName()) + + // Prompt for AI diagnosis + switch aiDiagnoseFlag { + case "on": + // When --ai-diagnose=on, ask user (unless -y is set) + if promptAIDiagnosis(autoYes) { + if cfg.QwenAPIKey == "" { + i18n.Printf("Qwen API Key is required for AI diagnosis. Please set it in config.\n") + os.Exit(1) + } + logContent, _ := os.ReadFile(logCtx.GetFileName()) + aiSuggestion, err := utils.DiagnoseWithAliQwen(cfg, "PREPARE", string(logContent)) + if err != nil { + i18n.Printf("AI diagnosis failed: %v\n", err) + } else { + fmt.Print(color.YellowString(i18n.Sprintf("AI diagnosis suggestion:\n"))) + fmt.Println(color.YellowString(aiSuggestion)) + } + } + case "off": + // do nothing, skip ai diagnose + default: + // Default: off (skip AI diagnosis to avoid interrupting user workflow) + // do nothing + } + os.Exit(1) + } + + logCtx.WriteLog("PREPARE", "Prepare completed successfully") + logCtx.MarkSuccess() + i18n.Printf("[backup-helper] Prepare completed successfully!\n") + i18n.Printf("[backup-helper] Backup is ready for restore in: %s\n", targetDir) + i18n.Printf("[backup-helper] Log file: %s\n", logCtx.GetFileName()) + return } - // If ioLimit is 0, cfg.Traffic will use default from SetDefaults() - // 4. Handle --download mode + // 5. Handle --download mode if doDownload { + // Pre-check for download mode + downloadCompressType := compressType + if downloadCompressType == "__NOT_SET__" { + downloadCompressType = cfg.CompressType + } + if downloadCompressType == "no" { + downloadCompressType = "" + } + + // Note: In non-check mode, we don't check TCP connectivity to avoid blocking + downloadResults := utils.CheckForDownloadMode(cfg, downloadCompressType, targetDir, "", 0) + hasCriticalError := false + for _, result := range downloadResults { + if result.Status == "ERROR" { + hasCriticalError = true + i18n.Printf("[ERROR] %s: %s - %s\n", result.Item, result.Value, result.Message) + } + } + if hasCriticalError { + i18n.Printf("\n[ERROR] Pre-flight checks failed. Please fix the errors above before proceeding.\n") + os.Exit(1) + } + + // Create log context + logCtx, err := utils.NewLogContext(cfg.LogDir, cfg.LogFileName) + if err != nil { + i18n.Printf("Failed to create log context: %v\n", err) + os.Exit(1) + } + defer logCtx.Close() + // Display header (only if not outputting to stdout) if downloadOutput != "-" { outputHeader() @@ -153,6 +711,12 @@ func main() { // When outputting to stdout, output header to stderr outputHeaderToStderr() } + logCtx.WriteLog("DOWNLOAD", "Starting download mode") + + // Parse stream-host from command line or config + if streamHost == "" && cfg.StreamHost != "" { + streamHost = cfg.StreamHost + } // Parse stream-port from command line or config if streamPort == 0 && !isFlagPassed("stream-port") && cfg.StreamPort > 0 { @@ -167,13 +731,15 @@ func main() { streamKey = cfg.StreamKey } + // downloadCompressType is already set in pre-check above + // Determine output file path outputPath := downloadOutput if outputPath == "" && cfg.DownloadOutput != "" { outputPath = cfg.DownloadOutput } - if outputPath == "" { - // Default: backup_YYYYMMDDHHMMSS.xb + if outputPath == "" && targetDir == "" { + // Default: backup_YYYYMMDDHHMMSS.xb (only if not extracting) timestamp := time.Now().Format("20060102150405") outputPath = fmt.Sprintf("backup_%s.xb", timestamp) } @@ -181,82 +747,272 @@ func main() { // Display IO limit if outputPath == "-" { // Output to stderr when streaming to stdout - if ioLimit == -1 { + if cfg.IOLimit == -1 { i18n.Fprintf(os.Stderr, "[backup-helper] Rate limiting disabled (unlimited speed)\n") - } else if ioLimit > 0 { - i18n.Fprintf(os.Stderr, "[backup-helper] IO rate limit set to: %s/s\n", formatBytes(ioLimit)) - } else if cfg.Traffic > 0 { - i18n.Fprintf(os.Stderr, "[backup-helper] IO rate limit set to: %s/s (default)\n", formatBytes(cfg.Traffic)) + } else if cfg.IOLimit > 0 { + i18n.Fprintf(os.Stderr, "[backup-helper] IO rate limit set to: %s/s\n", formatBytes(cfg.IOLimit)) + } else { + i18n.Fprintf(os.Stderr, "[backup-helper] IO rate limit set to: %s/s (default)\n", formatBytes(cfg.GetRateLimit())) } } else { // Output to stdout when saving to file - if ioLimit == -1 { + if cfg.IOLimit == -1 { i18n.Printf("[backup-helper] Rate limiting disabled (unlimited speed)\n") - } else if ioLimit > 0 { - i18n.Printf("[backup-helper] IO rate limit set to: %s/s\n", formatBytes(ioLimit)) - } else if cfg.Traffic > 0 { - i18n.Printf("[backup-helper] IO rate limit set to: %s/s (default)\n", formatBytes(cfg.Traffic)) + } else if cfg.IOLimit > 0 { + i18n.Printf("[backup-helper] IO rate limit set to: %s/s\n", formatBytes(cfg.IOLimit)) + } else { + i18n.Printf("[backup-helper] IO rate limit set to: %s/s (default)\n", formatBytes(cfg.GetRateLimit())) } } - // Start TCP receiver - receiver, tracker, closer, actualPort, localIP, err := utils.StartStreamReceiver(streamPort, enableHandshake, streamKey, estimatedSize) - _ = actualPort // Port info already displayed in StartStreamReceiver - _ = localIP // IP info already displayed in StartStreamReceiver - if err != nil { - i18n.Fprintf(os.Stderr, "Stream receiver error: %v\n", err) - os.Exit(1) + // Start TCP receiver or client based on stream-host + isCompressed := downloadCompressType != "" + var receiver io.ReadCloser + var tracker *utils.ProgressTracker + var closer func() + + if streamHost != "" && streamPort > 0 { + // Active mode: connect to remote server to pull data + logCtx.WriteLog("DOWNLOAD", "Connecting to remote server %s:%d to pull data", streamHost, streamPort) + if outputPath == "-" { + i18n.Fprintf(os.Stderr, "[backup-helper] Connecting to %s:%d...\n", streamHost, streamPort) + } else { + i18n.Printf("[backup-helper] Connecting to %s:%d...\n", streamHost, streamPort) + } + receiver, tracker, closer, _, err = utils.StartStreamClientReader(streamHost, streamPort, enableHandshake, streamKey, estimatedSize, isCompressed, logCtx) + if err != nil { + logCtx.WriteLog("DOWNLOAD", "Stream client error: %v", err) + if outputPath == "-" { + i18n.Fprintf(os.Stderr, "Stream client error: %v\n", err) + } else { + i18n.Printf("Stream client error: %v\n", err) + } + os.Exit(1) + } + } else { + // Passive mode: listen locally and wait for connection + logCtx.WriteLog("DOWNLOAD", "Starting TCP receiver on port %d", streamPort) + var actualPort int + var localIP string + receiver, tracker, closer, actualPort, localIP, err = utils.StartStreamReceiver(streamPort, enableHandshake, streamKey, estimatedSize, isCompressed, cfg.Timeout, logCtx) + _ = actualPort // Port info already displayed in StartStreamReceiver + _ = localIP // IP info already displayed in StartStreamReceiver + if err != nil { + logCtx.WriteLog("DOWNLOAD", "Stream receiver error: %v", err) + if outputPath == "-" { + i18n.Fprintf(os.Stderr, "Stream receiver error: %v\n", err) + } else { + i18n.Printf("Stream receiver error: %v\n", err) + } + os.Exit(1) + } } defer closer() // This will call tracker.Complete() internally - // Determine output destination - if outputPath == "-" { + // Apply rate limiting if configured + var reader io.Reader = receiver + rateLimit := cfg.GetRateLimit() + if rateLimit > 0 { + rateLimitedReader := utils.NewRateLimitedReader(receiver, rateLimit) + reader = rateLimitedReader + } + + // Determine output destination and handle extraction + if targetDir != "" { + // Extraction mode: decompress (if needed) and extract + if outputPath == "-" { + i18n.Printf("Error: --target-dir cannot be used with --output -\n") + os.Exit(1) + } + + // Check if target directory exists and is not empty + if info, err := os.Stat(targetDir); err == nil { + if info.IsDir() { + empty, err := isDirEmpty(targetDir) + if err != nil { + logCtx.WriteLog("DOWNLOAD", "Failed to check target directory: %v", err) + i18n.Printf("Error: Failed to check target directory: %v\n", err) + os.Exit(1) + } + if !empty { + // Directory exists and is not empty, ask user for confirmation + if !promptOverwrite(targetDir, autoYes) { + logCtx.WriteLog("DOWNLOAD", "User cancelled extraction to non-empty directory: %s", targetDir) + i18n.Printf("Extraction cancelled.\n") + os.Exit(0) + } + logCtx.WriteLog("DOWNLOAD", "User confirmed overwrite for directory: %s", targetDir) + i18n.Printf("Clearing target directory...\n") + logCtx.WriteLog("DOWNLOAD", "Clearing target directory: %s", targetDir) + if err := clearDirectory(targetDir); err != nil { + logCtx.WriteLog("DOWNLOAD", "Failed to clear target directory: %v", err) + i18n.Printf("Error: Failed to clear target directory: %v\n", err) + os.Exit(1) + } + logCtx.WriteLog("DOWNLOAD", "Target directory cleared successfully") + i18n.Printf("Target directory cleared. Proceeding with extraction...\n") + } + } else { + logCtx.WriteLog("DOWNLOAD", "Target path exists but is not a directory: %s", targetDir) + i18n.Printf("Error: Target path '%s' exists but is not a directory\n", targetDir) + os.Exit(1) + } + } + // If directory doesn't exist, it will be created by MkdirAll in extract functions + + // Set default output path if not specified (for qpress temp file) + if outputPath == "" && downloadCompressType == "qp" { + timestamp := time.Now().Format("20060102150405") + outputPath = fmt.Sprintf("backup_%s.xb", timestamp) + } + + if downloadCompressType != "" { + i18n.Printf("[backup-helper] Receiving backup data (compression: %s)...\n", downloadCompressType) + logCtx.WriteLog("DOWNLOAD", "Receiving compressed backup data (compression: %s)", downloadCompressType) + } else { + i18n.Printf("[backup-helper] Receiving backup data (no compression)...\n") + logCtx.WriteLog("DOWNLOAD", "Receiving uncompressed backup data") + } + i18n.Printf("[backup-helper] Extracting to directory: %s\n", targetDir) + logCtx.WriteLog("DOWNLOAD", "Extracting to directory: %s", targetDir) + + err := utils.ExtractBackupStream(reader, downloadCompressType, targetDir, outputPath, cfg.Parallel, cfg, logCtx) + if err != nil { + logCtx.WriteLog("EXTRACT", "Extraction error: %v", err) + // Read log content for error extraction + logContent, err2 := os.ReadFile(logCtx.GetFileName()) + if err2 == nil { + errorSummary := utils.ExtractErrorSummary("EXTRACT", string(logContent)) + if errorSummary != "" { + i18n.Printf("Extraction error. Error summary:\n%s\n", errorSummary) + } else { + i18n.Printf("Extraction error: %v\n", err) + } + } else { + i18n.Printf("Extraction error: %v\n", err) + } + i18n.Printf("Log file: %s\n", logCtx.GetFileName()) + + // Prompt for AI diagnosis + switch aiDiagnoseFlag { + case "on": + // When --ai-diagnose=on, ask user (unless -y is set) + if promptAIDiagnosis(autoYes) { + if cfg.QwenAPIKey == "" { + i18n.Printf("Qwen API Key is required for AI diagnosis. Please set it in config.\n") + os.Exit(1) + } + logContent, _ := os.ReadFile(logCtx.GetFileName()) + aiSuggestion, err := utils.DiagnoseWithAliQwen(cfg, "EXTRACT", string(logContent)) + if err != nil { + i18n.Printf("AI diagnosis failed: %v\n", err) + } else { + fmt.Print(color.YellowString(i18n.Sprintf("AI diagnosis suggestion:\n"))) + fmt.Println(color.YellowString(aiSuggestion)) + } + } + case "off": + // do nothing, skip ai diagnose + default: + // Default: off (skip AI diagnosis to avoid interrupting user workflow) + // do nothing + } + os.Exit(1) + } + i18n.Printf("[backup-helper] Extraction completed to: %s\n", targetDir) + logCtx.WriteLog("DOWNLOAD", "Extraction completed successfully") + logCtx.MarkSuccess() + i18n.Printf("[backup-helper] Log file: %s\n", logCtx.GetFileName()) + } else if outputPath == "-" { // Stream to stdout - set tracker to output progress to stderr if tracker != nil { tracker.SetOutputToStderr(true) } i18n.Fprintf(os.Stderr, "[backup-helper] Receiving backup data and streaming to stdout...\n") - // Apply rate limiting if configured - var reader io.Reader = receiver - if cfg.Traffic > 0 { - rateLimitedReader := utils.NewRateLimitedReader(receiver, cfg.Traffic) - reader = rateLimitedReader + + // If compression type is specified and outputting to stdout, handle decompression for piping + if downloadCompressType == "zstd" { + // Decompress zstd stream for piping to xbstream + decompressedReader, decompressCmd, err := utils.ExtractBackupStreamToStdout(reader, downloadCompressType, cfg.Parallel, logCtx) + if err != nil { + logCtx.WriteLog("DECOMPRESS", "Decompression error: %v", err) + i18n.Fprintf(os.Stderr, "Decompression error: %v\n", err) + os.Exit(1) + } + if decompressCmd != nil { + defer decompressCmd.Wait() + } + reader = decompressedReader + } else if downloadCompressType == "qp" { + logCtx.WriteLog("DOWNLOAD", "Warning: qpress compression cannot be stream-decompressed") + i18n.Fprintf(os.Stderr, "Warning: qpress compression cannot be stream-decompressed. Please save to file first.\n") } + _, err = io.Copy(os.Stdout, reader) if err != nil { - i18n.Fprintf(os.Stderr, "Download error: %v\n", err) + // Check if the error is related to connection interruption + errStr := err.Error() + if contains(errStr, "connection closed unexpectedly") || contains(errStr, "EOF") || contains(errStr, "broken pipe") { + logCtx.WriteLog("TCP", "Connection interrupted during transfer: %v", err) + i18n.Fprintf(os.Stderr, "Transfer interrupted: connection closed unexpectedly\n") + i18n.Fprintf(os.Stderr, "Error details: %v\n", err) + } else { + logCtx.WriteLog("DOWNLOAD", "Download error: %v", err) + i18n.Fprintf(os.Stderr, "Download error: %v\n", err) + } + i18n.Fprintf(os.Stderr, "Log file: %s\n", logCtx.GetFileName()) os.Exit(1) } // Progress tracker will display completion message via closer() } else { // Write to file i18n.Printf("[backup-helper] Receiving backup data and saving to: %s\n", outputPath) - // Apply rate limiting if configured - var reader io.Reader = receiver - if cfg.Traffic > 0 { - rateLimitedReader := utils.NewRateLimitedReader(receiver, cfg.Traffic) - reader = rateLimitedReader - } - file, err := os.Create(outputPath) - if err != nil { - i18n.Printf("Failed to create output file: %v\n", err) - os.Exit(1) - } - defer file.Close() + logCtx.WriteLog("DOWNLOAD", "Saving backup data to: %s", outputPath) + if downloadCompressType == "zstd" { + // Save decompressed zstd stream + err := utils.ExtractBackupStream(reader, downloadCompressType, "", outputPath, cfg.Parallel, cfg, logCtx) + if err != nil { + logCtx.WriteLog("EXTRACT", "Save error: %v", err) + i18n.Printf("Save error: %v\n", err) + os.Exit(1) + } + } else { + // Save as-is + file, err := os.Create(outputPath) + if err != nil { + logCtx.WriteLog("DOWNLOAD", "Failed to create output file: %v", err) + i18n.Printf("Failed to create output file: %v\n", err) + os.Exit(1) + } + defer file.Close() - _, err = io.Copy(file, reader) - if err != nil { - i18n.Printf("Download error: %v\n", err) - os.Exit(1) + _, err = io.Copy(file, reader) + if err != nil { + logCtx.WriteLog("DOWNLOAD", "Failed to save backup data: %v", err) + i18n.Printf("Download error: %v\n", err) + os.Exit(1) + } } // Progress tracker will display completion message via closer() - i18n.Printf("[backup-helper] Saved to: %s\n", outputPath) + i18n.Printf("[backup-helper] Download completed! Saved to: %s\n", outputPath) + logCtx.WriteLog("DOWNLOAD", "Download completed successfully") + logCtx.MarkSuccess() + i18n.Printf("[backup-helper] Log file: %s\n", logCtx.GetFileName()) } return } - // 5. If --backup, run backup/upload + // 6. If --backup, run backup/upload if doBackup { + // Pre-check for backup mode + effectiveCompressType := compressType + if effectiveCompressType == "__NOT_SET__" { + effectiveCompressType = cfg.CompressType + } + if effectiveCompressType == "no" { + effectiveCompressType = "" + } + // MySQL param check (only needed for backup) if password == "" { i18n.Printf("Please input mysql-server password: ") @@ -265,59 +1021,112 @@ func main() { password = string(pwd) } + // Get MySQL connection for pre-check + var db *sql.DB + if host != "" && user != "" && password != "" { + db = utils.GetConnection(host, port, user, password) + defer db.Close() + } + + // Run pre-flight checks + // Note: In non-check mode, we don't check TCP connectivity to avoid blocking + backupResults := utils.CheckForBackupMode(cfg, effectiveCompressType, db, "", 0) + hasCriticalError := false + for _, result := range backupResults { + if result.Status == "ERROR" { + hasCriticalError = true + i18n.Printf("[ERROR] %s: %s - %s\n", result.Item, result.Value, result.Message) + } + } + if hasCriticalError { + i18n.Printf("\n[ERROR] Pre-flight checks failed. Please fix the errors above before proceeding.\n") + os.Exit(1) + } + i18n.Printf("connect to mysql-server host=%s port=%d user=%s\n", host, port, user) outputHeader() - db := utils.GetConnection(host, port, user, password) - defer db.Close() - options := utils.CollectVariableFromMySQLServer(db) - utils.Check(options, cfg) + // db may already be set from pre-check above + if db == nil { + db = utils.GetConnection(host, port, user, password) + defer db.Close() + } + // Note: Pre-check already performed MySQL parameter checks, so we skip CollectVariableFromMySQLServer and Check here + // to avoid duplicate output. cfg.MysqlVersion is already set by CheckForBackupMode. // Display IO limit after parameter check - if ioLimit == -1 { + if cfg.IOLimit == -1 { i18n.Printf("[backup-helper] Rate limiting disabled (unlimited speed)\n") - } else if ioLimit > 0 { - i18n.Printf("[backup-helper] IO rate limit set to: %s/s\n", formatBytes(ioLimit)) - } else if cfg.Traffic > 0 { + } else if cfg.IOLimit > 0 { + i18n.Printf("[backup-helper] IO rate limit set to: %s/s\n", formatBytes(cfg.IOLimit)) + } else { // Using default rate limit - i18n.Printf("[backup-helper] IO rate limit set to: %s/s (default)\n", formatBytes(cfg.Traffic)) + i18n.Printf("[backup-helper] IO rate limit set to: %s/s (default)\n", formatBytes(cfg.GetRateLimit())) + } + + // Check compression dependencies early (before starting backup) + // effectiveCompressType is already set in pre-check above + if effectiveCompressType == "" && cfg.CompressType != "" { + effectiveCompressType = cfg.CompressType + } + // Normalize: "no" or empty string means no compression + if effectiveCompressType == "no" { + effectiveCompressType = "" + } + if effectiveCompressType != "" { + if err := utils.CheckCompressionDependencies(effectiveCompressType, true, cfg); err != nil { + i18n.Printf("Error: %v\n", err) + os.Exit(1) + } } // Check xtrabackup version (run early) mysqlVer := cfg.MysqlVersion - utils.CheckXtraBackupVersion(mysqlVer) + utils.CheckXtraBackupVersion(mysqlVer, cfg) + + // Create log context + logCtx, err := utils.NewLogContext(cfg.LogDir, cfg.LogFileName) + if err != nil { + i18n.Printf("Failed to create log context: %v\n", err) + os.Exit(1) + } + defer logCtx.Close() i18n.Printf("[backup-helper] Running xtrabackup...\n") cfg.MysqlHost = host cfg.MysqlPort = port cfg.MysqlUser = user cfg.MysqlPassword = password + logCtx.WriteLog("BACKUP", "Starting backup operation") + logCtx.WriteLog("BACKUP", "MySQL host: %s, port: %d, user: %s", host, port, user) // 1. Decide objectName suffix and compression param ossObjectName := cfg.ObjectName objectSuffix := ".xb" - // compressType default is empty - if mode == "stream" { - cfg.Compress = false - cfg.CompressType = "" - objectSuffix = ".xb" - } else if cfg.Compress { - switch compressType { - case "zstd": - objectSuffix = ".xb.zst" - cfg.CompressType = "zstd" - default: - objectSuffix = "_qp.xb" - cfg.CompressType = "" - } - } else { + // Determine effective compression type (reuse variable from above) + if effectiveCompressType == "" && cfg.CompressType != "" { + effectiveCompressType = cfg.CompressType + } + // Normalize: "no" or empty string means no compression + if effectiveCompressType == "no" { + effectiveCompressType = "" + } + + // Set cfg.CompressType based on effectiveCompressType + cfg.CompressType = effectiveCompressType + switch effectiveCompressType { + case "zstd": + objectSuffix = ".xb.zst" + case "qp": + objectSuffix = "_qp.xb" + default: objectSuffix = ".xb" - cfg.CompressType = "" } timestamp := time.Now().Format("_20060102150405") fullObjectName := ossObjectName + timestamp + objectSuffix - reader, cmd, logFileName, err := utils.RunXtraBackup(cfg) + reader, cmd, err := utils.RunXtraBackup(cfg, db, logCtx) if err != nil { + logCtx.WriteLog("BACKUP", "Failed to start xtrabackup: %v", err) i18n.Printf("Run xtrabackup error: %v\n", err) os.Exit(1) } @@ -346,18 +1155,35 @@ func main() { switch mode { case "oss": i18n.Printf("[backup-helper] Uploading to OSS...\n") - err = utils.UploadReaderToOSS(cfg, fullObjectName, reader, totalSize) + logCtx.WriteLog("OSS", "Starting OSS upload") + isCompressed := cfg.CompressType != "" + err = utils.UploadReaderToOSS(cfg, fullObjectName, reader, totalSize, isCompressed, logCtx) if err != nil { + logCtx.WriteLog("OSS", "OSS upload failed: %v", err) i18n.Printf("OSS upload error: %v\n", err) cmd.Process.Kill() os.Exit(1) } + logCtx.WriteLog("OSS", "OSS upload completed successfully") + logCtx.MarkSuccess() case "stream": - // Only use config value if command line didn't specify and config has non-zero value - // streamPort 0 means auto-find available port - if streamPort == 0 && !isFlagPassed("stream-port") && cfg.StreamPort > 0 { - streamPort = cfg.StreamPort + // Parse stream-host from command line or config + if streamHost == "" && cfg.StreamHost != "" { + streamHost = cfg.StreamHost + } + + // Parse remote-output from command line or config (if exists) + if remoteOutput == "" && cfg.RemoteOutput != "" { + remoteOutput = cfg.RemoteOutput + } + + // Validate SSH mode requirements + if useSSH && streamHost == "" { + i18n.Printf("Error: --ssh requires --stream-host\n") + cmd.Process.Kill() + os.Exit(1) } + // handshake priority:command line > config > default if !isFlagPassed("enable-handshake") { enableHandshake = cfg.EnableHandshake @@ -365,24 +1191,117 @@ func main() { if streamKey == "" { streamKey = cfg.StreamKey } - // streamPort can be 0 now (auto-find available port) - tcpWriter, _, closer, actualPort, localIP, err := utils.StartStreamSender(streamPort, enableHandshake, streamKey, totalSize) - _ = actualPort // Port info already displayed in StartStreamSender - _ = localIP // IP info already displayed in StartStreamSender - if err != nil { - i18n.Printf("Stream server error: %v\n", err) - os.Exit(1) + + var writer io.WriteCloser + var closer func() + var err error + + if streamHost != "" { + if useSSH { + // SSH mode: Start receiver on remote via SSH + logCtx.WriteLog("SSH", "Starting remote receiver via SSH") + logCtx.WriteLog("SSH", "Remote host: %s", streamHost) + i18n.Printf("[backup-helper] Starting remote receiver via SSH on %s...\n", streamHost) + + // Use stream-port if specified, otherwise auto-find (0) + sshPort := streamPort + if !isFlagPassed("stream-port") && cfg.StreamPort > 0 { + sshPort = cfg.StreamPort + } + + remotePort, outputPath, _, sshCleanup, err := utils.StartRemoteReceiverViaSSH( + streamHost, sshPort, remoteOutput, totalSize, enableHandshake, streamKey) + if err != nil { + i18n.Printf("SSH receiver error: %v\n", err) + cmd.Process.Kill() + os.Exit(1) + } + + streamPort = remotePort + if sshPort > 0 { + i18n.Printf("[backup-helper] Remote receiver started on port %d via SSH\n", streamPort) + } else { + i18n.Printf("[backup-helper] Remote receiver started on auto-discovered port %d via SSH\n", streamPort) + } + + // Display remote output path (show what was specified, or indicate auto-generated) + if outputPath != "" { + i18n.Printf("[backup-helper] Remote backup will be saved to: %s\n", outputPath) + } else if remoteOutput != "" { + i18n.Printf("[backup-helper] Remote backup will be saved to: %s\n", remoteOutput) + } else { + i18n.Printf("[backup-helper] Remote backup will be saved to: auto-generated path (backup_YYYYMMDDHHMMSS.xb)\n") + } + + // Connect to remote receiver + isCompressed := cfg.CompressType != "" + writer, _, closer, _, err = utils.StartStreamClient( + streamHost, streamPort, enableHandshake, streamKey, totalSize, isCompressed, logCtx) + if err != nil { + sshCleanup() + i18n.Printf("Stream client error: %v\n", err) + cmd.Process.Kill() + os.Exit(1) + } + + // Wrap closer to cleanup SSH process + originalCloser := closer + closer = func() { + if originalCloser != nil { + originalCloser() + } + sshCleanup() + } + } else { + // Normal mode: Direct connection to specified port + if streamPort == 0 && !isFlagPassed("stream-port") { + if cfg.StreamPort > 0 { + streamPort = cfg.StreamPort + } else { + i18n.Printf("Error: --stream-port is required when using --stream-host\n") + cmd.Process.Kill() + os.Exit(1) + } + } + + isCompressed := cfg.CompressType != "" + writer, _, closer, _, err = utils.StartStreamClient( + streamHost, streamPort, enableHandshake, streamKey, totalSize, isCompressed, logCtx) + if err != nil { + i18n.Printf("Stream client error: %v\n", err) + cmd.Process.Kill() + os.Exit(1) + } + } + } else { + // Passive connection: listen locally and wait for connection + // streamPort can be 0 now (auto-find available port) + if streamPort == 0 && !isFlagPassed("stream-port") && cfg.StreamPort > 0 { + streamPort = cfg.StreamPort + } + + tcpWriter, _, closerFunc, actualPort, localIP, err := utils.StartStreamSender(streamPort, enableHandshake, streamKey, totalSize, cfg.CompressType != "", cfg.Timeout, logCtx) + _ = actualPort // Port info already displayed in StartStreamSender + _ = localIP // IP info already displayed in StartStreamSender + if err != nil { + i18n.Printf("Stream server error: %v\n", err) + cmd.Process.Kill() + os.Exit(1) + } + writer = tcpWriter + closer = closerFunc } defer closer() // Apply rate limiting for stream mode if configured - writer := tcpWriter - if cfg.Traffic > 0 { - rateLimitedWriter := utils.NewRateLimitedWriter(tcpWriter, cfg.Traffic) - writer = rateLimitedWriter + var finalWriter io.WriteCloser = writer + rateLimit := cfg.GetRateLimit() + if rateLimit > 0 { + rateLimitedWriter := utils.NewRateLimitedWriter(writer, rateLimit) + finalWriter = rateLimitedWriter } - _, err = io.Copy(writer, reader) + _, err = io.Copy(finalWriter, reader) if err != nil { i18n.Printf("TCP stream error: %v\n", err) cmd.Process.Kill() @@ -394,39 +1313,33 @@ func main() { } cmd.Wait() - // backup log file needs to be closed - utils.CloseBackupLogFile(cmd) + logCtx.WriteLog("BACKUP", "xtrabackup process completed") // Check backup log - logContent, err := os.ReadFile(logFileName) + logContent, err := os.ReadFile(logCtx.GetFileName()) if err != nil { + logCtx.WriteLog("BACKUP", "Failed to read log file: %v", err) i18n.Printf("Backup log read error.\n") os.Exit(1) } if !strings.Contains(string(logContent), "completed OK!") { - i18n.Printf("Backup failed (no 'completed OK!').\n") - i18n.Printf("You can check the backup log file for details: %s\n", logFileName) + logCtx.WriteLog("BACKUP", "Backup failed: no 'completed OK!' found in log") + errorSummary := utils.ExtractErrorSummary("BACKUP", string(logContent)) + if errorSummary != "" { + i18n.Printf("Backup failed. Error summary:\n%s\n", errorSummary) + } else { + i18n.Printf("Backup failed (no 'completed OK!').\n") + } + i18n.Printf("Log file: %s\n", logCtx.GetFileName()) switch aiDiagnoseFlag { case "on": - if cfg.QwenAPIKey == "" { - i18n.Printf("Qwen API Key is required for AI diagnosis. Please set it in config.\n") - os.Exit(1) - } - aiSuggestion, err := utils.DiagnoseWithAliQwen(cfg, string(logContent)) - if err != nil { - i18n.Printf("AI diagnosis failed: %v\n", err) - } else { - fmt.Print(color.YellowString(i18n.Sprintf("AI diagnosis suggestion:\n"))) - fmt.Println(color.YellowString(aiSuggestion)) - } - case "off": - // do nothing, skip ai diagnose - default: - var input string - i18n.Printf("Would you like to use AI diagnosis? (y/n): ") - fmt.Scanln(&input) - if input == "y" || input == "Y" || input == "yes" || input == "Yes" { - aiSuggestion, err := utils.DiagnoseWithAliQwen(cfg, string(logContent)) + // When --ai-diagnose=on, ask user (unless -y is set) + if promptAIDiagnosis(autoYes) { + if cfg.QwenAPIKey == "" { + i18n.Printf("Qwen API Key is required for AI diagnosis. Please set it in config.\n") + os.Exit(1) + } + aiSuggestion, err := utils.DiagnoseWithAliQwen(cfg, "BACKUP", string(logContent)) if err != nil { i18n.Printf("AI diagnosis failed: %v\n", err) } else { @@ -434,32 +1347,51 @@ func main() { fmt.Println(color.YellowString(aiSuggestion)) } } + case "off": + // do nothing, skip ai diagnose + default: + // Default: off (skip AI diagnosis to avoid interrupting user workflow) + // do nothing } os.Exit(1) } + // Ensure a newline before completion message (in case progress tracker didn't clear properly) + fmt.Print("\n") + logCtx.WriteLog("BACKUP", "Backup completed successfully") + logCtx.MarkSuccess() i18n.Printf("[backup-helper] Backup and upload completed!\n") + i18n.Printf("[backup-helper] Log file: %s\n", logCtx.GetFileName()) return } else if existedBackup != "" { + // Create log context for existed backup + logCtx, err := utils.NewLogContext(cfg.LogDir, cfg.LogFileName) + if err != nil { + i18n.Printf("Failed to create log context: %v\n", err) + os.Exit(1) + } + defer logCtx.Close() + // upload existed backup file to OSS or stream via TCP + logCtx.WriteLog("BACKUP", "Processing existing backup file") i18n.Printf("[backup-helper] Processing existing backup file...\n") // Validate backup file before processing var backupInfo *utils.BackupFileInfo - var err error + var err2 error if existedBackup == "-" { // Validate data from stdin - backupInfo, err = utils.ValidateBackupFileFromStdin() - if err != nil { - i18n.Printf("Validation error: %v\n", err) + backupInfo, err2 = utils.ValidateBackupFileFromStdin() + if err2 != nil { + i18n.Printf("Validation error: %v\n", err2) os.Exit(1) } utils.PrintBackupFileValidationFromStdin(backupInfo) } else { // Validate file - backupInfo, err = utils.ValidateBackupFile(existedBackup) - if err != nil { - i18n.Printf("Validation error: %v\n", err) + backupInfo, err2 = utils.ValidateBackupFile(existedBackup) + if err2 != nil { + i18n.Printf("Validation error: %v\n", err2) os.Exit(1) } utils.PrintBackupFileValidation(existedBackup, backupInfo) @@ -472,13 +1404,13 @@ func main() { } // Display IO limit after validation - if ioLimit == -1 { + if cfg.IOLimit == -1 { i18n.Printf("[backup-helper] Rate limiting disabled (unlimited speed)\n") - } else if ioLimit > 0 { - i18n.Printf("[backup-helper] IO rate limit set to: %s/s\n", formatBytes(ioLimit)) - } else if cfg.Traffic > 0 { + } else if cfg.IOLimit > 0 { + i18n.Printf("[backup-helper] IO rate limit set to: %s/s\n", formatBytes(cfg.IOLimit)) + } else { // Using default rate limit - i18n.Printf("[backup-helper] IO rate limit set to: %s/s (default)\n", formatBytes(cfg.Traffic)) + i18n.Printf("[backup-helper] IO rate limit set to: %s/s (default)\n", formatBytes(cfg.GetRateLimit())) } // Get reader from existing backup file or stdin @@ -502,22 +1434,25 @@ func main() { // Determine object name suffix based on compression type ossObjectName := cfg.ObjectName objectSuffix := ".xb" - if mode == "stream" { - cfg.Compress = false - cfg.CompressType = "" - objectSuffix = ".xb" - } else if cfg.Compress { - switch compressType { - case "zstd": - objectSuffix = ".xb.zst" - cfg.CompressType = "zstd" - default: - objectSuffix = "_qp.xb" - cfg.CompressType = "" - } - } else { + // Determine effective compression type + effectiveCompressType := compressType + if effectiveCompressType == "" && cfg.CompressType != "" { + effectiveCompressType = cfg.CompressType + } + // Normalize: "no" or empty string means no compression + if effectiveCompressType == "no" { + effectiveCompressType = "" + } + + // Set cfg.CompressType based on effectiveCompressType + cfg.CompressType = effectiveCompressType + switch effectiveCompressType { + case "zstd": + objectSuffix = ".xb.zst" + case "qp": + objectSuffix = "_qp.xb" + default: objectSuffix = ".xb" - cfg.CompressType = "" } timestamp := time.Now().Format("_20060102150405") fullObjectName := ossObjectName + timestamp + objectSuffix @@ -544,17 +1479,48 @@ func main() { switch mode { case "oss": i18n.Printf("[backup-helper] Uploading existing backup to OSS...\n") - err := utils.UploadReaderToOSS(cfg, fullObjectName, reader, totalSize) + isCompressed := cfg.CompressType != "" + err := utils.UploadReaderToOSS(cfg, fullObjectName, reader, totalSize, isCompressed, logCtx) if err != nil { i18n.Printf("OSS upload error: %v\n", err) os.Exit(1) } i18n.Printf("[backup-helper] OSS upload completed!\n") + logCtx.MarkSuccess() case "stream": + // Parse stream-host from command line or config + if streamHost == "" && cfg.StreamHost != "" { + streamHost = cfg.StreamHost + } + // Only use config value if command line didn't specify and config has non-zero value - // streamPort 0 means auto-find available port - if streamPort == 0 && !isFlagPassed("stream-port") && cfg.StreamPort > 0 { - streamPort = cfg.StreamPort + // streamPort 0 means auto-find available port (only when not using stream-host) + if streamHost == "" { + if streamPort == 0 && !isFlagPassed("stream-port") && cfg.StreamPort > 0 { + streamPort = cfg.StreamPort + } + // Show equivalent command (before starting server, so we show original port) + equivalentSource := existedBackup + if existedBackup == "-" { + equivalentSource = "stdin" + } + if streamPort > 0 { + i18n.Printf("[backup-helper] Starting TCP stream server on port %d...\n", streamPort) + i18n.Printf("[backup-helper] Equivalent command: cat %s | nc -l4 %d\n", + equivalentSource, streamPort) + } else { + i18n.Printf("[backup-helper] Starting TCP stream server (auto-find available port)...\n") + } + } else { + // When using stream-host, port is required + if streamPort == 0 && !isFlagPassed("stream-port") { + if cfg.StreamPort > 0 { + streamPort = cfg.StreamPort + } else { + i18n.Printf("Error: --stream-port is required when using --stream-host\n") + os.Exit(1) + } + } } // handshake priority:command line > config > default @@ -565,46 +1531,53 @@ func main() { streamKey = cfg.StreamKey } - // streamPort can be 0 now (auto-find available port) - // Show equivalent command (before starting server, so we show original port) - equivalentSource := existedBackup - if existedBackup == "-" { - equivalentSource = "stdin" - } - if streamPort > 0 { - i18n.Printf("[backup-helper] Starting TCP stream server on port %d...\n", streamPort) - i18n.Printf("[backup-helper] Equivalent command: cat %s | nc -l4 %d\n", - equivalentSource, streamPort) - } else { - i18n.Printf("[backup-helper] Starting TCP stream server (auto-find available port)...\n") - } + var writer io.WriteCloser + var closer func() + var err error - tcpWriter, _, closer, actualPort, localIP, err := utils.StartStreamSender(streamPort, enableHandshake, streamKey, totalSize) - _ = actualPort // Port info already displayed in StartStreamSender - _ = localIP // IP info already displayed in StartStreamSender - if err != nil { - i18n.Printf("Stream server error: %v\n", err) - os.Exit(1) + if streamHost != "" { + // Active connection: connect to remote server + logCtx.WriteLog("TCP", "Active push mode: connecting to %s:%d", streamHost, streamPort) + isCompressed := cfg.CompressType != "" + writer, _, closer, _, err = utils.StartStreamClient(streamHost, streamPort, enableHandshake, streamKey, totalSize, isCompressed, logCtx) + if err != nil { + i18n.Printf("Stream client error: %v\n", err) + os.Exit(1) + } + } else { + // Passive connection: listen locally and wait for connection + // streamPort can be 0 now (auto-find available port) + tcpWriter, _, closerFunc, actualPort, localIP, err := utils.StartStreamSender(streamPort, enableHandshake, streamKey, totalSize, cfg.CompressType != "", cfg.Timeout, logCtx) + _ = actualPort // Port info already displayed in StartStreamSender + _ = localIP // IP info already displayed in StartStreamSender + if err != nil { + i18n.Printf("Stream server error: %v\n", err) + os.Exit(1) + } + writer = tcpWriter + closer = closerFunc } defer closer() // Apply rate limiting for stream mode if configured - writer := tcpWriter - if cfg.Traffic > 0 { - rateLimitedWriter := utils.NewRateLimitedWriter(tcpWriter, cfg.Traffic) - writer = rateLimitedWriter + var finalWriter io.WriteCloser = writer + rateLimit := cfg.GetRateLimit() + if rateLimit > 0 { + rateLimitedWriter := utils.NewRateLimitedWriter(writer, rateLimit) + finalWriter = rateLimitedWriter } // Stream the backup data i18n.Printf("[backup-helper] Streaming backup data...\n") - _, err = io.Copy(writer, reader) + _, err = io.Copy(finalWriter, reader) if err != nil { i18n.Printf("TCP stream error: %v\n", err) os.Exit(1) } i18n.Printf("[backup-helper] Stream completed!\n") + logCtx.MarkSuccess() default: i18n.Printf("Unknown mode: %s\n", mode) os.Exit(1) diff --git a/mysql-backup-helper b/mysql-backup-helper new file mode 100755 index 0000000..87bd21f Binary files /dev/null and b/mysql-backup-helper differ diff --git a/utils/ai_diag.go b/utils/ai_diag.go index 05c7087..0f11b7f 100644 --- a/utils/ai_diag.go +++ b/utils/ai_diag.go @@ -10,7 +10,9 @@ import ( ) // DiagnoseWithAliQwen call qwen-max-latest model to diagnose the log content -func DiagnoseWithAliQwen(cfg *Config, logContent string) (string, error) { +// module: module type (BACKUP, PREPARE, TCP, OSS, DECOMPRESS, EXTRACT, XBSTREAM) +// logContent: log content to diagnose +func DiagnoseWithAliQwen(cfg *Config, module string, logContent string) (string, error) { if cfg.QwenAPIKey == "" { return "", errors.New("DashScope API Key is not set in config") } @@ -18,11 +20,15 @@ func DiagnoseWithAliQwen(cfg *Config, logContent string) (string, error) { option.WithAPIKey(cfg.QwenAPIKey), option.WithBaseURL("https://dashscope.aliyuncs.com/compatible-mode/v1/"), ) + + // Get module-specific prompt + prompt := getDiagnosisPrompt(module) + chatCompletion, err := client.Chat.Completions.New( context.TODO(), openai.ChatCompletionNewParams{ Messages: openai.F( []openai.ChatCompletionMessageParamUnion{ - openai.SystemMessage(i18n.Sprintf("AI_DIAG_PROMPT")), + openai.SystemMessage(prompt), openai.UserMessage(logContent), }, ), @@ -34,3 +40,21 @@ func DiagnoseWithAliQwen(cfg *Config, logContent string) (string, error) { } return chatCompletion.Choices[0].Message.Content, nil } + +// getDiagnosisPrompt returns module-specific diagnosis prompt +func getDiagnosisPrompt(module string) string { + switch module { + case "BACKUP": + return i18n.Sprintf("AI_DIAG_PROMPT_BACKUP") + case "PREPARE": + return i18n.Sprintf("AI_DIAG_PROMPT_PREPARE") + case "TCP": + return i18n.Sprintf("AI_DIAG_PROMPT_TCP") + case "OSS": + return i18n.Sprintf("AI_DIAG_PROMPT_OSS") + case "DECOMPRESS", "EXTRACT", "XBSTREAM": + return i18n.Sprintf("AI_DIAG_PROMPT_EXTRACT") + default: + return i18n.Sprintf("AI_DIAG_PROMPT") + } +} diff --git a/utils/backup.go b/utils/backup.go index f4bb3b7..11723cd 100644 --- a/utils/backup.go +++ b/utils/backup.go @@ -1,6 +1,7 @@ package utils import ( + "database/sql" "fmt" "io" "os" @@ -8,17 +9,11 @@ import ( "path/filepath" "sort" "strings" - "time" + "syscall" "github.com/gioco-play/easy-i18n/i18n" ) -// Config adds Compress field -// type Config struct { -// ... -// Compress bool `json:"compress"` -// } - func ensureLogsDir(logDir string) error { // if log directory is relative path, make it relative to current working directory if !filepath.IsAbs(logDir) { @@ -37,13 +32,8 @@ func ensureLogsDir(logDir string) error { return nil } -func getLogFileName(logDir string) string { - timestamp := time.Now().Format("20060102150405") - return filepath.Join(logDir, fmt.Sprintf("xtrabackup-%s.log", timestamp)) -} - func cleanOldLogs(logDir string, keep int) error { - pattern := filepath.Join(logDir, "xtrabackup-*.log") + pattern := filepath.Join(logDir, "backup-helper-*.log") files, err := filepath.Glob(pattern) if err != nil { return fmt.Errorf("failed to glob log files: %v", err) @@ -74,12 +64,28 @@ func cleanOldLogs(logDir string, keep int) error { return nil } -// RunXtraBackup calls xtrabackup, returns backup data io.Reader, cmd, log file name and error -func RunXtraBackup(cfg *Config) (io.Reader, *exec.Cmd, string, error) { - if err := ensureLogsDir(cfg.LogDir); err != nil { - return nil, nil, "", err +// RunXtraBackup calls xtrabackup, returns backup data io.Reader, cmd and error +// db is used to get MySQL config file path and must be a valid MySQL connection +// logCtx is used to write logs for backup operations +func RunXtraBackup(cfg *Config, db *sql.DB, logCtx *LogContext) (io.Reader, *exec.Cmd, error) { + if logCtx == nil { + return nil, nil, fmt.Errorf("log context is required") + } + + // Resolve xtrabackup and xbstream paths + xtrabackupPath, _, err := ResolveXtrabackupPath(cfg, true) + if err != nil { + return nil, nil, err + } + + // Check for MySQL config file first (must be first argument if present) + // Only use defaults-file if explicitly specified by user (via --defaults-file or config) + // We do NOT auto-detect to avoid using wrong config file (e.g., from another MySQL instance) + var defaultsFile string + if cfg.DefaultsFile != "" { + // Use explicitly specified defaults-file + defaultsFile = cfg.DefaultsFile } - cleanOldLogs(cfg.LogDir, 10) args := []string{ "--backup", @@ -88,9 +94,10 @@ func RunXtraBackup(cfg *Config) (io.Reader, *exec.Cmd, string, error) { fmt.Sprintf("--user=%s", cfg.MysqlUser), fmt.Sprintf("--password=%s", cfg.MysqlPassword), "--stream=xbstream", + "--slave-info", // Record master binary log position for replication setup "--backup-lock-timeout=120", "--backup-lock-retry-count=0", - "--close-files=0", + "--close-files=1", // Enable close-files to handle large number of tables "--ftwrl-wait-timeout=60", "--ftwrl-wait-threshold=60", "--ftwrl-wait-query-type=ALL", @@ -99,32 +106,60 @@ func RunXtraBackup(cfg *Config) (io.Reader, *exec.Cmd, string, error) { "--lock-ddl=0", } + // Prepend --defaults-file if config file is found (must be first argument) + if defaultsFile != "" { + args = append([]string{fmt.Sprintf("--defaults-file=%s", defaultsFile)}, args...) + } + + // Add --parallel (default is 4) + parallel := cfg.Parallel + if parallel == 0 { + parallel = 4 + } + args = append(args, fmt.Sprintf("--parallel=%d", parallel)) + + // Set ulimit for file descriptors (655360) + // Set the limit for current process, child processes will inherit + var rlimit syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err == nil { + if rlimit.Cur < 655360 { + rlimit.Cur = 655360 + if rlimit.Max < 655360 { + rlimit.Max = 655360 + } + // Try to set the limit (may fail if not enough privileges) + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit) + } + } + var cmd *exec.Cmd if cfg.CompressType == "zstd" { // Check zstd dependency if _, err := exec.LookPath("zstd"); err != nil { - return nil, nil, "", fmt.Errorf("%s", i18n.Sprintf("zstd command not found. Please install zstd: https://github.com/facebook/zstd")) + return nil, nil, fmt.Errorf("%s", i18n.Sprintf("zstd command not found. Please install zstd: https://github.com/facebook/zstd")) + } + // Get parallel value for zstd compression + parallel := cfg.Parallel + if parallel == 0 { + parallel = 4 } // Print equivalent shell command - cmdStr := "xtrabackup " + strings.Join(args, " ") + " | zstd -q -" + cmdStr := fmt.Sprintf("%s %s | zstd -q -T%d -", xtrabackupPath, strings.Join(args, " "), parallel) i18n.Printf("Equivalent shell command: %s\n", cmdStr) - // Use pipe method: xtrabackup ... | zstd - xtrabackupCmd := exec.Command("xtrabackup", args...) - zstdCmd := exec.Command("zstd", "-q", "-") + logCtx.WriteLog("BACKUP", "Starting xtrabackup backup with zstd compression") + logCtx.WriteLog("BACKUP", "Command: %s", cmdStr) + // Use pipe method: xtrabackup ... | zstd -T + xtrabackupCmd := exec.Command(xtrabackupPath, args...) + zstdCmd := exec.Command("zstd", "-q", fmt.Sprintf("-T%d", parallel), "-") - logFileName := getLogFileName(cfg.LogDir) - logFile, err := os.Create(logFileName) - if err != nil { - return nil, nil, "", err - } - xtrabackupCmd.Stderr = logFile - zstdCmd.Stderr = logFile + xtrabackupCmd.Stderr = logCtx.GetFile() + zstdCmd.Stderr = logCtx.GetFile() // Connect pipe pipe, err := xtrabackupCmd.StdoutPipe() if err != nil { - logFile.Close() - return nil, nil, "", err + logCtx.WriteLog("BACKUP", "Failed to create pipe: %v", err) + return nil, nil, err } zstdCmd.Stdin = pipe @@ -135,59 +170,134 @@ func RunXtraBackup(cfg *Config) (io.Reader, *exec.Cmd, string, error) { // Start xtrabackup if err := xtrabackupCmd.Start(); err != nil { - logFile.Close() - return nil, nil, "", err + logCtx.WriteLog("BACKUP", "Failed to start xtrabackup: %v", err) + return nil, nil, err } stdout, err := cmd.StdoutPipe() if err != nil { - logFile.Close() - return nil, nil, "", err + logCtx.WriteLog("BACKUP", "Failed to create stdout pipe: %v", err) + return nil, nil, err } if err := cmd.Start(); err != nil { - logFile.Close() - return nil, nil, "", err + logCtx.WriteLog("BACKUP", "Failed to start zstd: %v", err) + return nil, nil, err } - // Note: Caller needs to logFile.Close() after cmd.Wait() - return stdout, cmd, logFileName, nil + logCtx.WriteLog("BACKUP", "xtrabackup and zstd processes started successfully") + return stdout, cmd, nil } // Non-zstd branch, always assign cmd - if cfg.Compress { + // Use cfg.CompressType == "qp" to determine if we need --compress + if cfg.CompressType == "qp" { args = append(args, "--compress") + // Add --compress-threads for parallel compression + args = append(args, fmt.Sprintf("--compress-threads=%d", parallel)) } - cmd = exec.Command("xtrabackup", args...) + cmd = exec.Command(xtrabackupPath, args...) - cmdStr := "xtrabackup " + strings.Join(args, " ") + cmdStr := xtrabackupPath + " " + strings.Join(args, " ") i18n.Printf("Equivalent shell command: %s\n", cmdStr) - - logFileName := getLogFileName(cfg.LogDir) - logFile, err := os.Create(logFileName) - if err != nil { - return nil, nil, "", err + logCtx.WriteLog("BACKUP", "Starting xtrabackup backup") + if cfg.CompressType == "qp" { + logCtx.WriteLog("BACKUP", "Using qpress compression") + } else { + logCtx.WriteLog("BACKUP", "No compression") } - cmd.Stderr = logFile + logCtx.WriteLog("BACKUP", "Command: %s", cmdStr) + cmd.Stderr = logCtx.GetFile() stdout, err := cmd.StdoutPipe() if err != nil { - logFile.Close() - return nil, nil, "", err + logCtx.WriteLog("BACKUP", "Failed to create stdout pipe: %v", err) + return nil, nil, err } if err := cmd.Start(); err != nil { - logFile.Close() - return nil, nil, "", err + logCtx.WriteLog("BACKUP", "Failed to start xtrabackup: %v", err) + return nil, nil, err } - return stdout, cmd, logFileName, nil + logCtx.WriteLog("BACKUP", "xtrabackup process started successfully") + return stdout, cmd, nil } -// CloseBackupLogFile closes cmd's Stderr log file (if it's *os.File) -func CloseBackupLogFile(cmd *exec.Cmd) { - if cmd == nil || cmd.Stderr == nil { - return +// RunXtrabackupPrepare executes xtrabackup --prepare on a backup directory +// targetDir: directory containing the backup to prepare +// cfg: configuration containing parallel and useMemory settings +// db: optional MySQL connection for getting defaults-file (can be nil for prepare) +// logCtx: log context for writing logs +func RunXtrabackupPrepare(cfg *Config, targetDir string, db *sql.DB, logCtx *LogContext) (*exec.Cmd, error) { + if logCtx == nil { + return nil, fmt.Errorf("log context is required") + } + + // Resolve xtrabackup path + xtrabackupPath, _, err := ResolveXtrabackupPath(cfg, false) + if err != nil { + return nil, err + } + + // Check for MySQL config file first (must be first argument if present) + // Only use defaults-file if explicitly specified by user (via --defaults-file or config) + // We do NOT auto-detect to avoid using wrong config file (e.g., from another MySQL instance) + var defaultsFile string + if cfg.DefaultsFile != "" { + // Use explicitly specified defaults-file + defaultsFile = cfg.DefaultsFile + } + + args := []string{ + "--prepare", + fmt.Sprintf("--target-dir=%s", targetDir), + } + + // Prepend --defaults-file if config file is found (must be first argument) + if defaultsFile != "" { + args = append([]string{fmt.Sprintf("--defaults-file=%s", defaultsFile)}, args...) + } + + // Add --parallel (default is 4) + parallel := cfg.Parallel + if parallel == 0 { + parallel = 4 } - if f, ok := cmd.Stderr.(*os.File); ok { - f.Close() + args = append(args, fmt.Sprintf("--parallel=%d", parallel)) + + // Add --use-memory (default is 1G) + useMemory := cfg.UseMemory + if useMemory == "" { + useMemory = "1G" + } + args = append(args, fmt.Sprintf("--use-memory=%s", useMemory)) + + // Set ulimit for file descriptors (655360) + var rlimit syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err == nil { + if rlimit.Cur < 655360 { + rlimit.Cur = 655360 + if rlimit.Max < 655360 { + rlimit.Max = 655360 + } + // Try to set the limit (may fail if not enough privileges) + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit) + } + } + + cmd := exec.Command(xtrabackupPath, args...) + + cmdStr := xtrabackupPath + " " + strings.Join(args, " ") + i18n.Printf("Equivalent shell command: %s\n", cmdStr) + logCtx.WriteLog("PREPARE", "Starting xtrabackup prepare") + logCtx.WriteLog("PREPARE", "Target directory: %s", targetDir) + logCtx.WriteLog("PREPARE", "Command: %s", cmdStr) + cmd.Stderr = logCtx.GetFile() + cmd.Stdout = logCtx.GetFile() + + if err := cmd.Start(); err != nil { + logCtx.WriteLog("PREPARE", "Failed to start xtrabackup prepare: %v", err) + return nil, err } + logCtx.WriteLog("PREPARE", "xtrabackup prepare process started successfully") + return cmd, nil } diff --git a/utils/checker.go b/utils/checker.go index db4fb06..0a828cd 100644 --- a/utils/checker.go +++ b/utils/checker.go @@ -117,8 +117,16 @@ func getVersion(value string) Version { return Version{major, minor, micro} } -func CheckXtraBackupVersion(mysqlVer Version) { - cmd := exec.Command("xtrabackup", "--version") +func CheckXtraBackupVersion(mysqlVer Version, cfg *Config) { + // Resolve xtrabackup path + xtrabackupPath, _, err := ResolveXtrabackupPath(cfg, false) + if err != nil { + msg := fmt.Sprintf("[Error] Cannot resolve xtrabackup path: %v", err) + i18n.Printf(color.RedString("%s\n", msg)) + return + } + + cmd := exec.Command(xtrabackupPath, "--version") out, err := cmd.CombinedOutput() if err != nil { msg := i18n.Sprintf("[Error] Cannot execute xtrabackup --version, please confirm that Percona XtraBackup is installed and in PATH") @@ -165,8 +173,14 @@ func CheckXtraBackupVersion(mysqlVer Version) { } // GetXtrabackupVersion Extract xtrabackup major.minor.patch-revision four-part version number -func GetXtrabackupVersion() [4]int { - cmd := exec.Command("xtrabackup", "--version") +func GetXtrabackupVersion(cfg *Config) [4]int { + // Resolve xtrabackup path + xtrabackupPath, _, err := ResolveXtrabackupPath(cfg, false) + if err != nil { + return [4]int{0, 0, 0, 0} + } + + cmd := exec.Command(xtrabackupPath, "--version") out, err := cmd.CombinedOutput() if err != nil { return [4]int{0, 0, 0, 0} diff --git a/utils/collect.go b/utils/collect.go index 7b07d42..5cf0683 100644 --- a/utils/collect.go +++ b/utils/collect.go @@ -7,6 +7,11 @@ import ( ) func CollectVariableFromMySQLServer(db *sql.DB) map[string]string { + return CollectVariableFromMySQLServerSilent(db, false) +} + +// CollectVariableFromMySQLServerSilent collects MySQL variables, with optional silent mode +func CollectVariableFromMySQLServerSilent(db *sql.DB, silent bool) map[string]string { items := []string{ "version", "gtid_mode", @@ -16,12 +21,18 @@ func CollectVariableFromMySQLServer(db *sql.DB) map[string]string { "log_bin", } result := make(map[string]string) - i18n.Printf("Get parameter for checking...\n") + if !silent { + i18n.Printf("Get parameter for checking...\n") + } for _, item := range items { val := GetMySQLVariable(db, item) - i18n.Printf("\t%s=%s\n", item, val) + if !silent { + i18n.Printf("\t%s=%s\n", item, val) + } result[item] = val } - i18n.Printf("\n") + if !silent { + i18n.Printf("\n") + } return result } diff --git a/utils/compression_check.go b/utils/compression_check.go new file mode 100644 index 0000000..6bd1d0f --- /dev/null +++ b/utils/compression_check.go @@ -0,0 +1,121 @@ +package utils + +import ( + "fmt" + "os/exec" + + "github.com/gioco-play/easy-i18n/i18n" +) + +// checkToolExecutable checks if a tool is executable using multiple fallback methods +// This provides better compatibility with older versions that may not support --version +func checkToolExecutable(toolName string, toolPath string) error { + // Try multiple methods to verify it's executable + // Method 1: Try --version + cmd := exec.Command(toolPath, "--version") + if err := cmd.Run(); err == nil { + return nil + } + + // Method 2: Try -h + cmd = exec.Command(toolPath, "-h") + if err := cmd.Run(); err == nil { + return nil + } + + // Method 3: Try --help + cmd = exec.Command(toolPath, "--help") + if err := cmd.Run(); err == nil { + return nil + } + + // Method 4: Try without arguments (will show usage) + cmd = exec.Command(toolPath) + err := cmd.Run() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + // Exit code 1 or 2 usually means "usage" or "missing arguments" + // This is acceptable - it proves the tool is executable + if exitErr.ExitCode() == 1 || exitErr.ExitCode() == 2 { + return nil // Success - tool is executable + } + } + return fmt.Errorf("%s command found but not executable: %v", toolName, err) + } + + return nil +} + +// CheckCompressionDependencies checks if required tools are available for the specified compression type +// For backup mode: zstd needs zstd tool, qp needs qpress tool (xtrabackup --compress uses qpress internally) +// For download mode: both zstd and qp need external tools for decompression +// isBackupMode: true for backup mode, false for download mode +// cfg: configuration for resolving xtrabackup/xbstream paths +// Returns error if tool is not found or not executable +func CheckCompressionDependencies(compressType string, isBackupMode bool, cfg *Config) error { + switch compressType { + case "zstd": + zstdPath, err := exec.LookPath("zstd") + if err != nil { + return fmt.Errorf("%s", i18n.Sprintf("zstd command not found. Please install zstd: https://github.com/facebook/zstd")) + } + // Test if zstd is executable using multiple fallback methods + if err := checkToolExecutable("zstd", zstdPath); err != nil { + return fmt.Errorf("%s", i18n.Sprintf("zstd command found but not executable. Please check installation")) + } + return nil + case "qp": + // For qpress compression, we need qpress tool + // Even though xtrabackup --compress uses qpress internally, we still need qpress tool for backup mode + qpressPath, err := exec.LookPath("qpress") + if err != nil { + return fmt.Errorf("%s", i18n.Sprintf("qpress command not found. Please install qpress: https://github.com/mariadb-corporation/qpress")) + } + // Test if qpress is executable using multiple fallback methods + if err := checkToolExecutable("qpress", qpressPath); err != nil { + return fmt.Errorf("%s", i18n.Sprintf("qpress command found but not executable. Please check installation")) + } + if !isBackupMode { + // For download mode, also check xbstream and xtrabackup + _, _, err := ResolveXtrabackupPath(cfg, true) + if err != nil { + return err + } + } + return nil + case "": + // No compression, no dependencies needed + return nil + default: + return fmt.Errorf("unknown compression type: %s", compressType) + } +} + +// CheckExtractionDependencies checks if required tools are available for extraction +// This is used in download mode when --target-dir is specified +func CheckExtractionDependencies(compressType string, cfg *Config) error { + switch compressType { + case "zstd": + // Need zstd for decompression and xbstream for extraction + if err := CheckCompressionDependencies("zstd", false, cfg); err != nil { + return err + } + _, _, err := ResolveXtrabackupPath(cfg, true) + if err != nil { + return err + } + return nil + case "qp": + // Need qpress, xbstream, and xtrabackup + return CheckCompressionDependencies("qp", false, cfg) + case "": + // No compression, only need xbstream + _, _, err := ResolveXtrabackupPath(cfg, true) + if err != nil { + return err + } + return nil + default: + return fmt.Errorf("unknown compression type: %s", compressType) + } +} diff --git a/utils/config.go b/utils/config.go index 3c41993..965d24b 100644 --- a/utils/config.go +++ b/utils/config.go @@ -14,24 +14,30 @@ type Config struct { ObjectName string `json:"objectName"` Size int `json:"size"` Buffer int `json:"buffer"` - Traffic int64 `json:"traffic"` MysqlHost string `json:"mysqlHost"` MysqlPort int `json:"mysqlPort"` MysqlUser string `json:"mysqlUser"` MysqlPassword string `json:"mysqlPassword"` - Compress bool `json:"compress"` CompressType string `json:"compressType"` Mode string `json:"mode"` StreamPort int `json:"streamPort"` + StreamHost string `json:"streamHost"` MysqlVersion Version `json:"mysqlVersion"` QwenAPIKey string `json:"qwenAPIKey"` EnableHandshake bool `json:"enableHandshake"` StreamKey string `json:"streamKey"` ExistedBackup string `json:"existedBackup"` LogDir string `json:"logDir"` + LogFileName string `json:"logFileName"` EstimatedSize int64 `json:"estimatedSize"` IOLimit int64 `json:"ioLimit"` DownloadOutput string `json:"downloadOutput"` + RemoteOutput string `json:"remoteOutput"` + Parallel int `json:"parallel"` + UseMemory string `json:"useMemory"` + XtrabackupPath string `json:"xtrabackupPath"` + DefaultsFile string `json:"defaultsFile"` + Timeout int `json:"timeout"` // TCP connection timeout in seconds (default: 60, max: 3600) } func LoadConfig(path string) (*Config, error) { @@ -51,9 +57,6 @@ func (c *Config) SetDefaults() { if c.Buffer == 0 { c.Buffer = 10 } - if c.Traffic == 0 { - c.Traffic = 209715200 // 200MB/s - } // Note: StreamPort 0 means auto-find available port, don't set default to 9999 if c.MysqlPort == 0 { c.MysqlPort = 3306 @@ -61,4 +64,31 @@ func (c *Config) SetDefaults() { if c.LogDir == "" { c.LogDir = "/var/log/mysql-backup-helper" } + if c.Parallel == 0 { + c.Parallel = 4 // Default parallel threads for xtrabackup + } + if c.UseMemory == "" { + c.UseMemory = "1G" // Default memory for prepare operation + } + if c.Timeout == 0 { + c.Timeout = 60 // Default TCP connection timeout: 60 seconds + } + // Enforce maximum timeout: 3600 seconds (1 hour) + if c.Timeout > 3600 { + c.Timeout = 3600 + } +} + +// GetRateLimit returns the actual rate limit value for rate limiting +// -1 means unlimited (returns 0) +// 0 means use default (returns 200MB/s) +// > 0 means use the specified value +func (c *Config) GetRateLimit() int64 { + if c.IOLimit == -1 { + return 0 // 0 means unlimited for rate limiters + } + if c.IOLimit == 0 { + return 209715200 // Default: 200MB/s + } + return c.IOLimit } diff --git a/utils/datadir.go b/utils/datadir.go index f68a652..5b8d70a 100644 --- a/utils/datadir.go +++ b/utils/datadir.go @@ -51,8 +51,9 @@ func CalculateBackupSize(datadir string) (int64, error) { shouldBackup = true } + // binlog and relay log are NOT backed up by xtrabackup, so exclude them if strings.HasPrefix(base, "binlog") || strings.HasPrefix(base, "relay-bin") { - shouldBackup = true + shouldBackup = false } if shouldBackup { diff --git a/utils/extract.go b/utils/extract.go new file mode 100644 index 0000000..24f7eee --- /dev/null +++ b/utils/extract.go @@ -0,0 +1,465 @@ +package utils + +import ( + "fmt" + "io" + "os" + "os/exec" + "runtime" + "strings" + "syscall" + + "github.com/gioco-play/easy-i18n/i18n" +) + +// ExtractBackupStream handles decompression and extraction of backup stream +// compressType: "zstd", "qp", or "" (no compression) +// targetDir: directory to extract files, if empty, just save compressed/uncompressed file +// parallel: number of parallel threads (default: 4) +// cfg: configuration for resolving xtrabackup/xbstream paths +// logCtx: log context for writing logs +// Returns error if extraction fails +func ExtractBackupStream(reader io.Reader, compressType string, targetDir string, outputPath string, parallel int, cfg *Config, logCtx *LogContext) error { + if parallel == 0 { + parallel = 4 + } + + if targetDir == "" { + // No extraction requested, just save the stream + if compressType == "zstd" { + // For zstd, we need to decompress first + return saveZstdDecompressed(reader, outputPath, parallel, logCtx) + } + // For qpress or no compression, save as-is + if logCtx != nil { + logCtx.WriteLog("EXTRACT", "Saving stream to %s", outputPath) + } + file, err := os.Create(outputPath) + if err != nil { + return fmt.Errorf("failed to create output file: %v", err) + } + defer file.Close() + _, err = io.Copy(file, reader) + if err != nil { + if logCtx != nil { + logCtx.WriteLog("EXTRACT", "Failed to save stream: %v", err) + // Check if it's a connection error + errStr := err.Error() + if strings.Contains(strings.ToLower(errStr), "eof") || strings.Contains(strings.ToLower(errStr), "broken pipe") || strings.Contains(strings.ToLower(errStr), "connection") { + logCtx.WriteLog("TCP", "Connection interrupted while saving stream: %v", err) + } + } + return err + } + return nil + } + + // Extraction requested + if compressType == "zstd" { + return extractZstdStream(reader, targetDir, parallel, cfg, logCtx) + } else if compressType == "qp" { + // qpress compression requires saving to file first, then using xtrabackup --decompress + // This is because xbstream doesn't support --decompress in stream mode for MySQL 5.7 + return extractQpressStream(reader, targetDir, outputPath, parallel, cfg, logCtx) + } else { + // No compression, just extract with xbstream + return extractXbstream(reader, targetDir, parallel, cfg, logCtx) + } +} + +// saveZstdDecompressed saves zstd-compressed stream after decompression +func saveZstdDecompressed(reader io.Reader, outputPath string, parallel int, logCtx *LogContext) error { + // Check zstd dependency + if _, err := exec.LookPath("zstd"); err != nil { + return fmt.Errorf("%s", i18n.Sprintf("zstd command not found. Please install zstd: https://github.com/facebook/zstd")) + } + + if parallel == 0 { + parallel = 4 + } + + if logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "Decompressing zstd stream to %s", outputPath) + } + + zstdCmd := exec.Command("zstd", "-d", fmt.Sprintf("-T%d", parallel), "-o", outputPath) + zstdCmd.Stdin = reader + if logCtx != nil { + zstdCmd.Stderr = logCtx.GetFile() + zstdCmd.Stdout = logCtx.GetFile() + } else { + zstdCmd.Stderr = os.Stderr + zstdCmd.Stdout = os.Stderr + } + + err := zstdCmd.Run() + if err != nil && logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "zstd decompression failed: %v", err) + } else if logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "zstd decompression completed successfully") + } + return err +} + +// extractZstdStream decompresses zstd stream and extracts with xbstream +func extractZstdStream(reader io.Reader, targetDir string, parallel int, cfg *Config, logCtx *LogContext) error { + // Check dependencies + if _, err := exec.LookPath("zstd"); err != nil { + return fmt.Errorf("%s", i18n.Sprintf("zstd command not found. Please install zstd: https://github.com/facebook/zstd")) + } + + // Resolve xbstream path + _, xbstreamPath, err := ResolveXtrabackupPath(cfg, true) + if err != nil { + return err + } + + if parallel == 0 { + parallel = 4 + } + + // Create extraction directory + if err := os.MkdirAll(targetDir, 0755); err != nil { + return fmt.Errorf("failed to create extraction directory: %v", err) + } + + if logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "Decompressing zstd stream") + logCtx.WriteLog("XBSTREAM", "Extracting to directory: %s", targetDir) + } + + // Pipe: reader -> zstd -d -T -> xbstream -x --parallel= + zstdCmd := exec.Command("zstd", "-d", fmt.Sprintf("-T%d", parallel), "-") + zstdCmd.Stdin = reader + if logCtx != nil { + zstdCmd.Stderr = logCtx.GetFile() + } else { + zstdCmd.Stderr = os.Stderr + } + + xbstreamCmd := exec.Command(xbstreamPath, "-x", fmt.Sprintf("--parallel=%d", parallel), "-C", targetDir) + xbstreamCmd.Stdin, _ = zstdCmd.StdoutPipe() + if logCtx != nil { + xbstreamCmd.Stderr = logCtx.GetFile() + xbstreamCmd.Stdout = logCtx.GetFile() + } else { + xbstreamCmd.Stderr = os.Stderr + xbstreamCmd.Stdout = os.Stderr + } + + if err := zstdCmd.Start(); err != nil { + if logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "Failed to start zstd: %v", err) + } + return fmt.Errorf("failed to start zstd decompression: %v", err) + } + + if err := xbstreamCmd.Start(); err != nil { + zstdCmd.Process.Kill() + if logCtx != nil { + logCtx.WriteLog("XBSTREAM", "Failed to start xbstream: %v", err) + } + return fmt.Errorf("failed to start xbstream extraction: %v", err) + } + + // Wait for both processes + zstdErr := zstdCmd.Wait() + xbstreamErr := xbstreamCmd.Wait() + + // Check if zstd failed due to connection error + if zstdErr != nil { + if logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "zstd decompression failed: %v", zstdErr) + } + // Check if it's a connection error (broken pipe or EOF unexpectedly) + errStr := zstdErr.Error() + if strings.Contains(strings.ToLower(errStr), "broken pipe") || strings.Contains(strings.ToLower(errStr), "eof") || strings.Contains(strings.ToLower(errStr), "connection") { + errMsg := fmt.Sprintf("zstd decompression interrupted: connection closed unexpectedly: %v", zstdErr) + if logCtx != nil { + logCtx.WriteLog("TCP", "Connection interrupted during decompression: %s", errMsg) + } + return fmt.Errorf("%s", errMsg) + } + // Check for signal-based termination (Unix-like systems) + if runtime.GOOS != "windows" { + if exitError, ok := zstdErr.(*exec.ExitError); ok { + if status, ok := exitError.Sys().(syscall.WaitStatus); ok { + if status.Signaled() && (status.Signal() == syscall.SIGPIPE || status.Signal() == syscall.SIGTERM) { + errMsg := fmt.Sprintf("zstd decompression interrupted: connection closed unexpectedly (signal: %s)", status.Signal()) + if logCtx != nil { + logCtx.WriteLog("TCP", "Connection interrupted during decompression: %s", errMsg) + } + return fmt.Errorf("%s", errMsg) + } + } + } + } + return fmt.Errorf("zstd decompression failed: %v", zstdErr) + } + + // Check if xbstream failed due to connection error + if xbstreamErr != nil { + if logCtx != nil { + logCtx.WriteLog("XBSTREAM", "xbstream extraction failed: %v", xbstreamErr) + } + // Check if it's a connection error (broken pipe or EOF unexpectedly) + errStr := xbstreamErr.Error() + if strings.Contains(strings.ToLower(errStr), "broken pipe") || strings.Contains(strings.ToLower(errStr), "eof") || strings.Contains(strings.ToLower(errStr), "connection") { + errMsg := fmt.Sprintf("xbstream extraction interrupted: connection closed unexpectedly: %v", xbstreamErr) + if logCtx != nil { + logCtx.WriteLog("TCP", "Connection interrupted during extraction: %s", errMsg) + } + return fmt.Errorf("%s", errMsg) + } + // Check for signal-based termination (Unix-like systems) + if runtime.GOOS != "windows" { + if exitError, ok := xbstreamErr.(*exec.ExitError); ok { + if status, ok := exitError.Sys().(syscall.WaitStatus); ok { + if status.Signaled() && (status.Signal() == syscall.SIGPIPE || status.Signal() == syscall.SIGTERM) { + errMsg := fmt.Sprintf("xbstream extraction interrupted: connection closed unexpectedly (signal: %s)", status.Signal()) + if logCtx != nil { + logCtx.WriteLog("TCP", "Connection interrupted during extraction: %s", errMsg) + } + return fmt.Errorf("%s", errMsg) + } + } + } + } + return fmt.Errorf("xbstream extraction failed: %v", xbstreamErr) + } + + if logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "zstd decompression completed successfully") + logCtx.WriteLog("XBSTREAM", "xbstream extraction completed successfully") + } + return nil +} + +// extractQpressStream handles qpress-compressed backup stream +// Note: xbstream doesn't support --decompress in stream mode for MySQL 5.7 +// So we need to save to file first, then extract and decompress +func extractQpressStream(reader io.Reader, targetDir string, outputPath string, parallel int, cfg *Config, logCtx *LogContext) error { + // Resolve xtrabackup and xbstream paths + xtrabackupPath, xbstreamPath, err := ResolveXtrabackupPath(cfg, true) + if err != nil { + return err + } + + if parallel == 0 { + parallel = 4 + } + + if logCtx != nil { + logCtx.WriteLog("EXTRACT", "Extracting qpress-compressed backup") + logCtx.WriteLog("EXTRACT", "Target directory: %s", targetDir) + } + + // Step 1: Save compressed stream to file + if outputPath == "" { + outputPath = "backup_temp.xb" + } + file, err := os.Create(outputPath) + if err != nil { + return fmt.Errorf("failed to create temporary file: %v", err) + } + + _, err = io.Copy(file, reader) + file.Close() + if err != nil { + os.Remove(outputPath) + if logCtx != nil { + logCtx.WriteLog("EXTRACT", "Failed to save compressed stream: %v", err) + // Check if it's a connection error + errStr := err.Error() + if strings.Contains(strings.ToLower(errStr), "eof") || strings.Contains(strings.ToLower(errStr), "broken pipe") || strings.Contains(strings.ToLower(errStr), "connection") { + logCtx.WriteLog("TCP", "Connection interrupted while saving compressed stream: %v", err) + } + } + return fmt.Errorf("failed to save compressed stream: %v", err) + } + + if logCtx != nil { + logCtx.WriteLog("EXTRACT", "Saved compressed stream to temporary file: %s", outputPath) + } + + // Step 2: Extract with xbstream + if err := os.MkdirAll(targetDir, 0755); err != nil { + os.Remove(outputPath) + return fmt.Errorf("failed to create extraction directory: %v", err) + } + + extractFile, err := os.Open(outputPath) + if err != nil { + os.Remove(outputPath) + return fmt.Errorf("failed to open compressed file: %v", err) + } + defer extractFile.Close() + + if logCtx != nil { + logCtx.WriteLog("XBSTREAM", "Extracting with xbstream") + } + + xbstreamCmd := exec.Command(xbstreamPath, "-x", fmt.Sprintf("--parallel=%d", parallel), "-C", targetDir) + xbstreamCmd.Stdin = extractFile + if logCtx != nil { + xbstreamCmd.Stderr = logCtx.GetFile() + xbstreamCmd.Stdout = logCtx.GetFile() + } else { + xbstreamCmd.Stderr = os.Stderr + xbstreamCmd.Stdout = os.Stderr + } + + if err := xbstreamCmd.Run(); err != nil { + os.Remove(outputPath) + if logCtx != nil { + logCtx.WriteLog("XBSTREAM", "xbstream extraction failed: %v", err) + } + return fmt.Errorf("xbstream extraction failed: %v", err) + } + + if logCtx != nil { + logCtx.WriteLog("XBSTREAM", "xbstream extraction completed successfully") + logCtx.WriteLog("DECOMPRESS", "Decompressing with xtrabackup --decompress") + } + + // Step 3: Decompress extracted files using xtrabackup --decompress + xtrabackupCmd := exec.Command(xtrabackupPath, "--decompress", fmt.Sprintf("--parallel=%d", parallel), "--target-dir", targetDir) + if logCtx != nil { + xtrabackupCmd.Stderr = logCtx.GetFile() + xtrabackupCmd.Stdout = logCtx.GetFile() + } else { + xtrabackupCmd.Stderr = os.Stderr + xtrabackupCmd.Stdout = os.Stderr + } + + if err := xtrabackupCmd.Run(); err != nil { + os.Remove(outputPath) + if logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "xtrabackup decompression failed: %v", err) + } + return fmt.Errorf("xtrabackup decompression failed: %v", err) + } + + // Clean up temporary file + os.Remove(outputPath) + + if logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "xtrabackup decompression completed successfully") + logCtx.WriteLog("EXTRACT", "Extraction completed successfully") + } + return nil +} + +// extractXbstream extracts uncompressed xbstream backup +func extractXbstream(reader io.Reader, targetDir string, parallel int, cfg *Config, logCtx *LogContext) error { + // Resolve xbstream path + _, xbstreamPath, err := ResolveXtrabackupPath(cfg, true) + if err != nil { + return err + } + + if parallel == 0 { + parallel = 4 + } + + // Create extraction directory + if err := os.MkdirAll(targetDir, 0755); err != nil { + return fmt.Errorf("failed to create extraction directory: %v", err) + } + + if logCtx != nil { + logCtx.WriteLog("XBSTREAM", "Extracting xbstream backup to directory: %s", targetDir) + } + + // Extract with xbstream + xbstreamCmd := exec.Command(xbstreamPath, "-x", fmt.Sprintf("--parallel=%d", parallel), "-C", targetDir) + xbstreamCmd.Stdin = reader + if logCtx != nil { + xbstreamCmd.Stderr = logCtx.GetFile() + xbstreamCmd.Stdout = logCtx.GetFile() + } else { + xbstreamCmd.Stderr = os.Stderr + xbstreamCmd.Stdout = os.Stderr + } + + err = xbstreamCmd.Run() + if err != nil { + if logCtx != nil { + logCtx.WriteLog("XBSTREAM", "xbstream extraction failed: %v", err) + } + // Check if it's a connection error (broken pipe or EOF unexpectedly) + errStr := err.Error() + if strings.Contains(strings.ToLower(errStr), "broken pipe") || strings.Contains(strings.ToLower(errStr), "eof") || strings.Contains(strings.ToLower(errStr), "connection") { + errMsg := fmt.Sprintf("xbstream extraction interrupted: connection closed unexpectedly: %v", err) + if logCtx != nil { + logCtx.WriteLog("TCP", "Connection interrupted during extraction: %s", errMsg) + } + return fmt.Errorf("%s", errMsg) + } + // Check for signal-based termination (Unix-like systems) + if runtime.GOOS != "windows" { + if exitError, ok := err.(*exec.ExitError); ok { + if status, ok := exitError.Sys().(syscall.WaitStatus); ok { + if status.Signaled() && (status.Signal() == syscall.SIGPIPE || status.Signal() == syscall.SIGTERM) { + errMsg := fmt.Sprintf("xbstream extraction interrupted: connection closed unexpectedly (signal: %s)", status.Signal()) + if logCtx != nil { + logCtx.WriteLog("TCP", "Connection interrupted during extraction: %s", errMsg) + } + return fmt.Errorf("%s", errMsg) + } + } + } + } + return fmt.Errorf("xbstream extraction failed: %v", err) + } + if logCtx != nil { + logCtx.WriteLog("XBSTREAM", "xbstream extraction completed successfully") + } + return nil +} + +// ExtractBackupStreamToStdout handles decompression only (for piping to xbstream) +// Returns reader that can be piped to xbstream +func ExtractBackupStreamToStdout(reader io.Reader, compressType string, parallel int, logCtx *LogContext) (io.Reader, *exec.Cmd, error) { + if compressType == "zstd" { + // Check zstd dependency + if _, err := exec.LookPath("zstd"); err != nil { + return nil, nil, fmt.Errorf("%s", i18n.Sprintf("zstd command not found. Please install zstd: https://github.com/facebook/zstd")) + } + + if parallel == 0 { + parallel = 4 + } + + if logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "Decompressing zstd stream to stdout") + } + + // Decompress with zstd + zstdCmd := exec.Command("zstd", "-d", fmt.Sprintf("-T%d", parallel), "-") + zstdCmd.Stdin = reader + if logCtx != nil { + zstdCmd.Stderr = logCtx.GetFile() + } else { + zstdCmd.Stderr = os.Stderr + } + + stdout, err := zstdCmd.StdoutPipe() + if err != nil { + return nil, nil, err + } + + if err := zstdCmd.Start(); err != nil { + if logCtx != nil { + logCtx.WriteLog("DECOMPRESS", "Failed to start zstd: %v", err) + } + return nil, nil, fmt.Errorf("failed to start zstd decompression: %v", err) + } + + return stdout, zstdCmd, nil + } + + // No compression or qpress - return as-is + // Note: qpress cannot be stream-decompressed, so user needs to save file first + return reader, nil, nil +} diff --git a/utils/i18n.go b/utils/i18n.go index e442be1..7e5117a 100644 --- a/utils/i18n.go +++ b/utils/i18n.go @@ -74,6 +74,11 @@ func InitEn() { message.SetString(language.English, "Qwen API Key is required for AI diagnosis. Please set it in config.\n", "Qwen API Key is required for AI diagnosis. Please set it in config.\n") message.SetString(language.English, "AI diagnosis on backup failure: on/off. If not set, prompt interactively.", "AI diagnosis on backup failure: on/off. If not set, prompt interactively.") message.SetString(language.English, "AI_DIAG_PROMPT", "You are a MySQL backup expert. Based on the provided log error information, give concise and clear repair suggestions in English. The output should be suitable for display in the command line, avoid using Markdown format, and use a clear text structure.\n\nSample output format:\nERROR: [Error keyword]\nCAUSE: [Brief analysis of the cause]\nFIX: [Specific repair steps]") + message.SetString(language.English, "AI_DIAG_PROMPT_BACKUP", "You are a MySQL backup expert specializing in xtrabackup backup operations. Based on the provided log error information, give concise and clear repair suggestions in English. Focus on xtrabackup backup failures, MySQL connection issues, lock timeouts, and backup-related errors. The output should be suitable for display in the command line, avoid using Markdown format, and use a clear text structure.\n\nSample output format:\nERROR: [Error keyword]\nCAUSE: [Brief analysis of the cause]\nFIX: [Specific repair steps]") + message.SetString(language.English, "AI_DIAG_PROMPT_PREPARE", "You are a MySQL backup expert specializing in xtrabackup prepare operations. Based on the provided log error information, give concise and clear repair suggestions in English. Focus on prepare-specific errors, InnoDB recovery issues, log file corruption, and prepare-related errors. The output should be suitable for display in the command line, avoid using Markdown format, and use a clear text structure.\n\nSample output format:\nERROR: [Error keyword]\nCAUSE: [Brief analysis of the cause]\nFIX: [Specific repair steps]") + message.SetString(language.English, "AI_DIAG_PROMPT_TCP", "You are a network and MySQL backup expert. Based on the provided log error information, give concise and clear repair suggestions in English. Focus on TCP connection issues, network errors, handshake failures, and streaming-related errors. The output should be suitable for display in the command line, avoid using Markdown format, and use a clear text structure.\n\nSample output format:\nERROR: [Error keyword]\nCAUSE: [Brief analysis of the cause]\nFIX: [Specific repair steps]") + message.SetString(language.English, "AI_DIAG_PROMPT_OSS", "You are a cloud storage and MySQL backup expert. Based on the provided log error information, give concise and clear repair suggestions in English. Focus on OSS upload failures, network issues, authentication errors, and upload-related errors. The output should be suitable for display in the command line, avoid using Markdown format, and use a clear text structure.\n\nSample output format:\nERROR: [Error keyword]\nCAUSE: [Brief analysis of the cause]\nFIX: [Specific repair steps]") + message.SetString(language.English, "AI_DIAG_PROMPT_EXTRACT", "You are a MySQL backup expert specializing in backup extraction and decompression. Based on the provided log error information, give concise and clear repair suggestions in English. Focus on xbstream extraction errors, decompression failures (zstd/qpress), file system issues, and extraction-related errors. The output should be suitable for display in the command line, avoid using Markdown format, and use a clear text structure.\n\nSample output format:\nERROR: [Error keyword]\nCAUSE: [Brief analysis of the cause]\nFIX: [Specific repair steps]") message.SetString(language.English, "Enable handshake for TCP streaming (default: false, can be set in config)", "Enable handshake for TCP streaming (default: false, can be set in config)") message.SetString(language.English, "Handshake key for TCP streaming (default: empty, can be set in config)", "Handshake key for TCP streaming (default: empty, can be set in config)") message.SetString(language.English, "Path to existing xtrabackup backup file to upload (use '-' for stdin)", "Path to existing xtrabackup backup file to upload (use '-' for stdin)") @@ -87,6 +92,11 @@ func InitEn() { message.SetString(language.English, "Equivalent command: cat %s | nc -l4 %d", "Equivalent command: cat %s | nc -l4 %d") message.SetString(language.English, "Streaming backup data...", "Streaming backup data...") message.SetString(language.English, "Stream completed!", "Stream completed!") + message.SetString(language.English, "Warning: Target directory '%s' already exists and is not empty.", "Warning: Target directory '%s' already exists and is not empty.") + message.SetString(language.English, "Extracting to this directory may overwrite existing files.", "Extracting to this directory may overwrite existing files.") + message.SetString(language.English, "Do you want to continue? (y/n): ", "Do you want to continue? (y/n): ") + message.SetString(language.English, "Extraction cancelled.", "Extraction cancelled.") + message.SetString(language.English, "Proceeding with extraction (existing files may be overwritten)...", "Proceeding with extraction (existing files may be overwritten)...") // Backup file validation messages (English) message.SetString(language.English, "Validating backup file: %s", "Validating backup file: %s") @@ -146,6 +156,11 @@ func InitEn() { message.SetString(language.SimplifiedChinese, "Qwen API Key is required for AI diagnosis. Please set it in config.\n", "AI诊断需要 Qwen API Key,请在配置文件中设置。\n") message.SetString(language.SimplifiedChinese, "AI diagnosis on backup failure: on/off. If not set, prompt interactively.", "备份失败时AI诊断:on/off。不设置则交互式询问。") message.SetString(language.SimplifiedChinese, "AI_DIAG_PROMPT", "你是MySQL备份专家。请根据提供的日志错误信息,给出简洁、明确的中文修复建议。输出内容应适合在命令行中展示,避免使用Markdown格式,使用清晰的文本结构。\n\n示例输出格式:\n错误: [错误关键词]\n原因: [简要分析原因]\n修复: [具体修复步骤]") + message.SetString(language.SimplifiedChinese, "AI_DIAG_PROMPT_BACKUP", "你是MySQL备份专家,专注于xtrabackup备份操作。请根据提供的日志错误信息,给出简洁、明确的中文修复建议。重点关注xtrabackup备份失败、MySQL连接问题、锁超时和备份相关错误。输出内容应适合在命令行中展示,避免使用Markdown格式,使用清晰的文本结构。\n\n示例输出格式:\n错误: [错误关键词]\n原因: [简要分析原因]\n修复: [具体修复步骤]") + message.SetString(language.SimplifiedChinese, "AI_DIAG_PROMPT_PREPARE", "你是MySQL备份专家,专注于xtrabackup prepare操作。请根据提供的日志错误信息,给出简洁、明确的中文修复建议。重点关注prepare特定错误、InnoDB恢复问题、日志文件损坏和prepare相关错误。输出内容应适合在命令行中展示,避免使用Markdown格式,使用清晰的文本结构。\n\n示例输出格式:\n错误: [错误关键词]\n原因: [简要分析原因]\n修复: [具体修复步骤]") + message.SetString(language.SimplifiedChinese, "AI_DIAG_PROMPT_TCP", "你是网络和MySQL备份专家。请根据提供的日志错误信息,给出简洁、明确的中文修复建议。重点关注TCP连接问题、网络错误、握手失败和流式传输相关错误。输出内容应适合在命令行中展示,避免使用Markdown格式,使用清晰的文本结构。\n\n示例输出格式:\n错误: [错误关键词]\n原因: [简要分析原因]\n修复: [具体修复步骤]") + message.SetString(language.SimplifiedChinese, "AI_DIAG_PROMPT_OSS", "你是云存储和MySQL备份专家。请根据提供的日志错误信息,给出简洁、明确的中文修复建议。重点关注OSS上传失败、网络问题、认证错误和上传相关错误。输出内容应适合在命令行中展示,避免使用Markdown格式,使用清晰的文本结构。\n\n示例输出格式:\n错误: [错误关键词]\n原因: [简要分析原因]\n修复: [具体修复步骤]") + message.SetString(language.SimplifiedChinese, "AI_DIAG_PROMPT_EXTRACT", "你是MySQL备份专家,专注于备份提取和解压缩。请根据提供的日志错误信息,给出简洁、明确的中文修复建议。重点关注xbstream提取错误、解压缩失败(zstd/qpress)、文件系统问题和提取相关错误。输出内容应适合在命令行中展示,避免使用Markdown格式,使用清晰的文本结构。\n\n示例输出格式:\n错误: [错误关键词]\n原因: [简要分析原因]\n修复: [具体修复步骤]") message.SetString(language.SimplifiedChinese, "Enable handshake for TCP streaming (default: false, can be set in config)", "TCP流推送启用握手认证(默认false,可在配置文件设置)") message.SetString(language.SimplifiedChinese, "Handshake key for TCP streaming (default: empty, can be set in config)", "TCP流推送握手密钥(默认空,可在配置文件设置)") message.SetString(language.SimplifiedChinese, "Path to existing xtrabackup backup file to upload (use '-' for stdin)", "已存在的xtrabackup备份文件路径,用于上传(使用'-'表示从stdin读取)") @@ -157,8 +172,26 @@ func InitEn() { message.SetString(language.SimplifiedChinese, "OSS upload completed!", "OSS上传完成!") message.SetString(language.SimplifiedChinese, "Starting TCP stream server on port %d...", "正在启动端口%d的TCP流服务器...") message.SetString(language.SimplifiedChinese, "Equivalent command: cat %s | nc -l4 %d", "等价命令: cat %s | nc -l4 %d") - message.SetString(language.SimplifiedChinese, "Streaming backup data...", "正在流式传输备份数据...") - message.SetString(language.SimplifiedChinese, "Stream completed!", "流式传输完成!") + message.SetString(language.English, "Warning: Target directory '%s' already exists and is not empty.", "Warning: Target directory '%s' already exists and is not empty.") + message.SetString(language.English, "Extracting to this directory may overwrite existing files.", "Extracting to this directory may overwrite existing files.") + message.SetString(language.English, "Do you want to continue? (y/n): ", "Do you want to continue? (y/n): ") + message.SetString(language.English, "Extraction cancelled.", "Extraction cancelled.") + message.SetString(language.English, "Proceeding with extraction (existing files may be overwritten)...", "Proceeding with extraction (existing files may be overwritten)...") + message.SetString(language.English, "Clearing target directory...", "Clearing target directory...") + message.SetString(language.English, "Target directory cleared successfully", "Target directory cleared successfully") + message.SetString(language.English, "Target directory cleared. Proceeding with extraction...", "Target directory cleared. Proceeding with extraction...") + message.SetString(language.English, "Auto-confirming overwrite (--yes/-y flag is set)...", "Auto-confirming overwrite (--yes/-y flag is set)...") + message.SetString(language.English, "Auto-confirming AI diagnosis (--yes/-y flag is set)...", "Auto-confirming AI diagnosis (--yes/-y flag is set)...") + message.SetString(language.SimplifiedChinese, "Warning: Target directory '%s' already exists and is not empty.", "警告:目标目录 '%s' 已存在且不为空。") + message.SetString(language.SimplifiedChinese, "Extracting to this directory may overwrite existing files.", "提取到此目录可能会覆盖现有文件。") + message.SetString(language.SimplifiedChinese, "Do you want to continue? (y/n): ", "是否要继续?(y/n): ") + message.SetString(language.SimplifiedChinese, "Extraction cancelled.", "提取已取消。") + message.SetString(language.SimplifiedChinese, "Proceeding with extraction (existing files may be overwritten)...", "继续提取(现有文件可能被覆盖)...") + message.SetString(language.SimplifiedChinese, "Clearing target directory...", "正在清空目标目录...") + message.SetString(language.SimplifiedChinese, "Target directory cleared successfully", "目标目录已清空") + message.SetString(language.SimplifiedChinese, "Target directory cleared. Proceeding with extraction...", "目标目录已清空,继续提取...") + message.SetString(language.SimplifiedChinese, "Auto-confirming overwrite (--yes/-y flag is set)...", "自动确认覆盖(--yes/-y 标志已设置)...") + message.SetString(language.SimplifiedChinese, "Auto-confirming AI diagnosis (--yes/-y flag is set)...", "自动确认AI诊断(--yes/-y 标志已设置)...") // Backup file validation messages (Chinese) message.SetString(language.SimplifiedChinese, "Validating backup file: %s", "正在校验备份文件: %s") diff --git a/utils/log_context.go b/utils/log_context.go new file mode 100644 index 0000000..5b1ab15 --- /dev/null +++ b/utils/log_context.go @@ -0,0 +1,244 @@ +package utils + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strings" + "time" +) + +// LogContext manages unified log file for all operations +type LogContext struct { + logFile *os.File + logFileName string + logDir string + completedOK bool // Flag to mark if operation completed successfully +} + +// NewLogContext creates a new log context with backup-helper-{timestamp}.log +// If logFileName is provided, it will be used instead of auto-generated name +// logFileName can be: +// - Empty string: auto-generate backup-helper-{timestamp}.log +// - Relative path: will be joined with logDir +// - Absolute path: will be used as-is (logDir will be ignored for this file) +// +// If logFileName is an absolute path and logDir is also specified, logDir will be ignored +// and a warning will be printed (if verbose logging is enabled) +func NewLogContext(logDir string, logFileName string) (*LogContext, error) { + var finalLogFileName string + originalLogDir := logDir // Store original for conflict detection + + if logFileName != "" { + // Custom log file name provided + if filepath.IsAbs(logFileName) { + // Absolute path: use as-is, extract dir for cleanup + finalLogFileName = logFileName + // Check if logDir was also specified and differs from the logFileName's directory + logFileNameDir := filepath.Dir(logFileName) + if originalLogDir != "" && originalLogDir != logFileNameDir { + // Conflict detected: logDir is specified but logFileName is absolute path + // logDir will be ignored, use logFileName's directory instead + fmt.Fprintf(os.Stderr, "[WARNING] logDir (%s) is specified but will be ignored because logFileName is an absolute path (%s). Using directory from logFileName: %s\n", + originalLogDir, logFileName, logFileNameDir) + } + logDir = logFileNameDir + } else { + // Relative path: join with logDir + if err := ensureLogsDir(logDir); err != nil { + return nil, err + } + finalLogFileName = filepath.Join(logDir, logFileName) + } + // Ensure directory exists for custom file + if err := ensureLogsDir(filepath.Dir(finalLogFileName)); err != nil { + return nil, err + } + } else { + // Auto-generate log file name + if err := ensureLogsDir(logDir); err != nil { + return nil, err + } + cleanOldLogs(logDir, 10) + timestamp := time.Now().Format("20060102150405") + finalLogFileName = filepath.Join(logDir, fmt.Sprintf("backup-helper-%s.log", timestamp)) + } + + logFile, err := os.Create(finalLogFileName) + if err != nil { + return nil, fmt.Errorf("failed to create log file: %v", err) + } + + ctx := &LogContext{ + logFile: logFile, + logFileName: finalLogFileName, + logDir: logDir, + completedOK: false, // Default to false, will be set to true on successful completion + } + + // Write initial header + timestampFormatted := time.Now().Format("2006-01-02 15:04:05") + ctx.WriteLog("SYSTEM", "=== MySQL Backup Helper Log Started ===") + ctx.WriteLog("SYSTEM", "Timestamp: %s", timestampFormatted) + + return ctx, nil +} + +// WriteLog writes a log entry with [MODULE] prefix and timestamp +func (lc *LogContext) WriteLog(module string, format string, args ...interface{}) { + if lc.logFile == nil { + return + } + timestamp := time.Now().Format("2006-01-02 15:04:05") + message := fmt.Sprintf(format, args...) + logEntry := fmt.Sprintf("[%s] [%s] %s\n", timestamp, module, message) + lc.logFile.WriteString(logEntry) + lc.logFile.Sync() +} + +// WriteCommandOutput writes command stderr/stdout to log +func (lc *LogContext) WriteCommandOutput(module string, data []byte) { + if lc.logFile == nil || len(data) == 0 { + return + } + timestamp := time.Now().Format("2006-01-02 15:04:05") + scanner := bufio.NewScanner(strings.NewReader(string(data))) + for scanner.Scan() { + line := scanner.Text() + logEntry := fmt.Sprintf("[%s] [%s] %s\n", timestamp, module, line) + lc.logFile.WriteString(logEntry) + } + lc.logFile.Sync() +} + +// GetFile returns the underlying file for direct writing (e.g., redirecting command output) +func (lc *LogContext) GetFile() *os.File { + return lc.logFile +} + +// GetFileName returns the log file path +func (lc *LogContext) GetFileName() string { + return lc.logFileName +} + +// MarkSuccess marks the operation as successfully completed +// This will cause "completed OK!" to be written to the log before "Log Ended" +func (lc *LogContext) MarkSuccess() { + lc.completedOK = true +} + +// Close closes the log file +// If MarkSuccess() was called, it will write "completed OK!" before "Log Ended" +func (lc *LogContext) Close() { + if lc.logFile != nil { + timestamp := time.Now().Format("2006-01-02 15:04:05") + if lc.completedOK { + lc.logFile.WriteString(fmt.Sprintf("[%s] [SYSTEM] completed OK!\n", timestamp)) + } + lc.logFile.WriteString(fmt.Sprintf("[%s] [SYSTEM] === MySQL Backup Helper Log Ended ===\n", timestamp)) + lc.logFile.Close() + lc.logFile = nil + } +} + +// ExtractErrorSummary extracts error summary from log content based on module type +func ExtractErrorSummary(module string, logContent string) string { + if logContent == "" { + return "" + } + + lines := strings.Split(logContent, "\n") + errorLines := []string{} + + switch module { + case "BACKUP": + // Check if "completed OK!" exists + if !strings.Contains(logContent, "completed OK!") { + // Extract last 20 lines containing "error" or "failed" + for i := len(lines) - 1; i >= 0 && len(errorLines) < 20; i-- { + line := strings.ToLower(lines[i]) + if strings.Contains(line, "error") || strings.Contains(line, "failed") || + strings.Contains(line, "fatal") || strings.Contains(line, "critical") { + errorLines = append([]string{lines[i]}, errorLines...) + } + } + // If no error lines found, get last 20 lines + if len(errorLines) == 0 { + start := len(lines) - 20 + if start < 0 { + start = 0 + } + errorLines = lines[start:] + } + } + + case "PREPARE": + // Extract last 20 lines containing "error" or "failed" + for i := len(lines) - 1; i >= 0 && len(errorLines) < 20; i-- { + line := strings.ToLower(lines[i]) + if strings.Contains(line, "error") || strings.Contains(line, "failed") || + strings.Contains(line, "fatal") || strings.Contains(line, "critical") { + errorLines = append([]string{lines[i]}, errorLines...) + } + } + // If no error lines found, get last 20 lines + if len(errorLines) == 0 { + start := len(lines) - 20 + if start < 0 { + start = 0 + } + errorLines = lines[start:] + } + + case "TCP", "OSS": + // Extract connection/network errors + for i := len(lines) - 1; i >= 0 && len(errorLines) < 20; i-- { + line := strings.ToLower(lines[i]) + if strings.Contains(line, "error") || strings.Contains(line, "failed") || + strings.Contains(line, "timeout") || strings.Contains(line, "connection") || + strings.Contains(line, "refused") { + errorLines = append([]string{lines[i]}, errorLines...) + } + } + if len(errorLines) == 0 { + start := len(lines) - 20 + if start < 0 { + start = 0 + } + errorLines = lines[start:] + } + + case "DECOMPRESS", "EXTRACT", "XBSTREAM": + // Extract command output errors + for i := len(lines) - 1; i >= 0 && len(errorLines) < 20; i-- { + line := strings.ToLower(lines[i]) + if strings.Contains(line, "error") || strings.Contains(line, "failed") || + strings.Contains(line, "fatal") || strings.Contains(line, "cannot") || + strings.Contains(line, "unable") { + errorLines = append([]string{lines[i]}, errorLines...) + } + } + if len(errorLines) == 0 { + start := len(lines) - 20 + if start < 0 { + start = 0 + } + errorLines = lines[start:] + } + + default: + // Default: get last 20 lines + start := len(lines) - 20 + if start < 0 { + start = 0 + } + errorLines = lines[start:] + } + + if len(errorLines) == 0 { + return "" + } + + return strings.Join(errorLines, "\n") +} diff --git a/utils/mysql.go b/utils/mysql.go index 1a0faf3..652e293 100644 --- a/utils/mysql.go +++ b/utils/mysql.go @@ -24,15 +24,37 @@ func GetConnection(host string, port int, user string, password string) *sql.DB return db } +// GetMySQLVariable gets a MySQL variable value +// Returns empty string if query fails or variable not found +// This function does not exit on error to allow graceful degradation func GetMySQLVariable(db *sql.DB, name string) string { sql := fmt.Sprintf("select @@%s", name) results, err := db.Query(sql) - check(err) - for results.Next() { + if err != nil { + // Log error but don't exit - allow graceful degradation + // This is important when user doesn't have permission to query certain variables + // or when connecting to remote MySQL that may not expose all variables + return "" + } + defer results.Close() + + if results.Next() { var val string - err = results.Scan(&val) - check(err) + if err := results.Scan(&val); err != nil { + return "" + } return val } return "" } + +// GetMySQLConfigFile is deprecated and should not be used. +// We no longer auto-detect MySQL config files to avoid using wrong config file +// (e.g., from another MySQL instance on the same host). +// Users must explicitly specify --defaults-file if they want to use a config file. +// This function is kept for backward compatibility but always returns empty string. +func GetMySQLConfigFile(db *sql.DB) string { + // Always return empty - no auto-detection + // Users must explicitly specify --defaults-file + return "" +} diff --git a/utils/oss_client.go b/utils/oss_client.go index a76e589..cdb7582 100644 --- a/utils/oss_client.go +++ b/utils/oss_client.go @@ -27,27 +27,45 @@ func (listener *OssProgressListener) ProgressChanged(event *oss.ProgressEvent) { } // UploadReaderToOSS supports fragmenting upload from io.Reader to OSS, objectName is passed by the caller -func UploadReaderToOSS(cfg *Config, objectName string, reader io.Reader, totalSize int64) error { +func UploadReaderToOSS(cfg *Config, objectName string, reader io.Reader, totalSize int64, isCompressed bool, logCtx *LogContext) error { var waitSender sync.WaitGroup // Create progress tracker - tracker := NewProgressTracker(totalSize) + tracker := NewProgressTrackerWithCompression(totalSize, isCompressed) defer tracker.Complete() + if logCtx != nil { + logCtx.WriteLog("OSS", "Starting OSS upload") + logCtx.WriteLog("OSS", "Object name: %s", objectName) + logCtx.WriteLog("OSS", "Total size: %d bytes", totalSize) + } + client, err := oss.New(cfg.Endpoint, cfg.AccessKeyId, cfg.AccessKeySecret) if err != nil { + if logCtx != nil { + logCtx.WriteLog("OSS", "Failed to create OSS client: %v", err) + } return err } bucket, err := client.Bucket(cfg.BucketName) if err != nil { + if logCtx != nil { + logCtx.WriteLog("OSS", "Failed to get bucket: %v", err) + } return err } storageType := oss.ObjectStorageClass(oss.StorageStandard) imur, err := bucket.InitiateMultipartUpload(objectName, storageType) if err != nil { + if logCtx != nil { + logCtx.WriteLog("OSS", "Failed to initiate multipart upload: %v", err) + } return err } + if logCtx != nil { + logCtx.WriteLog("OSS", "Multipart upload initiated, upload ID: %s", imur.UploadID) + } bufferSize := cfg.Size if bufferSize == 0 { @@ -59,20 +77,27 @@ func UploadReaderToOSS(cfg *Config, objectName string, reader io.Reader, totalSi var parts []oss.UploadPart index := 1 + traffic := cfg.GetRateLimit() // Get actual rate limit value for { p := make([]byte, bufferSize) n, err := io.ReadFull(bufReader, p) if n > 0 { data := p[:n] waitSender.Add(1) - part, err := uploadPart(bucket, imur, data, index, cfg.Traffic) + part, err := uploadPart(bucket, imur, data, index, traffic) if err != nil { + if logCtx != nil { + logCtx.WriteLog("OSS", "Failed to upload part %d: %v", index, err) + } bucket.AbortMultipartUpload(imur) return err } parts = append(parts, part) index++ waitSender.Done() + if logCtx != nil && index%10 == 0 { + logCtx.WriteLog("OSS", "Uploaded %d parts", index-1) + } } if err == io.EOF || err == io.ErrUnexpectedEOF { break @@ -86,8 +111,15 @@ func UploadReaderToOSS(cfg *Config, objectName string, reader io.Reader, totalSi objectAcl := oss.ObjectACL(oss.ACLPrivate) _, err = bucket.CompleteMultipartUpload(imur, parts, objectAcl) if err != nil { + if logCtx != nil { + logCtx.WriteLog("OSS", "Failed to complete multipart upload: %v", err) + } return err } + if logCtx != nil { + logCtx.WriteLog("OSS", "OSS upload completed successfully") + logCtx.WriteLog("OSS", "Total parts uploaded: %d", len(parts)) + } return nil } diff --git a/utils/path_resolver.go b/utils/path_resolver.go new file mode 100644 index 0000000..94761d3 --- /dev/null +++ b/utils/path_resolver.go @@ -0,0 +1,136 @@ +package utils + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +// checkBinaryExecutable checks if a binary file exists and is executable +// It uses multiple fallback methods for compatibility: +// 1. Check file exists and has execute permission +// 2. Try to run with --version (for tools that support it) +// 3. Try to run with -h or --help (for tools that support it) +// 4. Try to run without arguments (will show usage, exit code 1 is OK) +func checkBinaryExecutable(binaryPath string, binaryName string) error { + // Check if file exists + info, err := os.Stat(binaryPath) + if err != nil { + return fmt.Errorf("%s binary not found at %s: %v", binaryName, binaryPath, err) + } + + // Check if it's a regular file (not a directory) + if info.IsDir() { + return fmt.Errorf("%s path is a directory, not a file: %s", binaryName, binaryPath) + } + + // Check execute permission + mode := info.Mode() + if mode.Perm()&0111 == 0 { + return fmt.Errorf("%s at %s does not have execute permission", binaryName, binaryPath) + } + + // Try multiple methods to verify it's actually executable + // Method 1: Try --version (most common, but not all tools support it) + cmd := exec.Command(binaryPath, "--version") + if err := cmd.Run(); err == nil { + return nil // Success + } + + // Method 2: Try -h (help, very common) + cmd = exec.Command(binaryPath, "-h") + if err := cmd.Run(); err == nil { + return nil // Success + } + + // Method 3: Try --help + cmd = exec.Command(binaryPath, "--help") + if err := cmd.Run(); err == nil { + return nil // Success + } + + // Method 4: Try running without arguments + // Most tools will show usage and exit with code 1, which is acceptable + // We consider it executable if the process can start (even if it exits with error) + cmd = exec.Command(binaryPath) + err = cmd.Run() + if err != nil { + // Check if it's an exit error (exit code != 0) + // This is OK - it means the binary ran but exited with an error + // (e.g., missing arguments, which is expected) + if exitErr, ok := err.(*exec.ExitError); ok { + // Exit code 1 or 2 usually means "usage" or "missing arguments" + // This is acceptable - it proves the binary is executable + if exitErr.ExitCode() == 1 || exitErr.ExitCode() == 2 { + return nil // Success - binary is executable + } + } + // Other errors (like "permission denied", "no such file") are real problems + return fmt.Errorf("%s at %s appears to exist but cannot be executed: %v", binaryName, binaryPath, err) + } + + // If we get here, the binary ran successfully (unlikely without args, but possible) + return nil +} + +// ResolveXtrabackupPath resolves the paths to xtrabackup and xbstream binaries. +// Priority: cfg.XtrabackupPath (from flag or config) > XTRABACKUP_PATH env var > PATH lookup +// If a directory path is provided, xbstream will be searched in the same directory. +// requireXbstream: if false, skip xbstream validation (e.g., for prepare mode) +// Returns: (xtrabackupPath, xbstreamPath, error) +func ResolveXtrabackupPath(cfg *Config, requireXbstream bool) (string, string, error) { + var basePath string + + // Priority 1: Command-line flag or config file + if cfg.XtrabackupPath != "" { + basePath = cfg.XtrabackupPath + } else { + // Priority 2: Environment variable + if envPath := os.Getenv("XTRABACKUP_PATH"); envPath != "" { + basePath = envPath + } else { + // Priority 3: PATH lookup + path, err := exec.LookPath("xtrabackup") + if err != nil { + return "", "", fmt.Errorf("xtrabackup not found in PATH. Please install Percona XtraBackup or specify path using --xtrabackup-path flag or XTRABACKUP_PATH environment variable") + } + basePath = path + } + } + + // Determine if basePath is a directory or file + info, err := os.Stat(basePath) + if err != nil { + return "", "", fmt.Errorf("xtrabackup path not found: %s: %v", basePath, err) + } + + var xtrabackupPath, xbstreamPath string + var xbstreamDir string + + if info.IsDir() { + // Directory: append /xtrabackup and /xbstream + xtrabackupPath = filepath.Join(basePath, "xtrabackup") + xbstreamPath = filepath.Join(basePath, "xbstream") + xbstreamDir = basePath + } else { + // File: use as-is for xtrabackup, find xbstream in same directory + xtrabackupPath = basePath + xbstreamDir = filepath.Dir(basePath) + xbstreamPath = filepath.Join(xbstreamDir, "xbstream") + } + + // Validate xtrabackup path exists and is executable + if err := checkBinaryExecutable(xtrabackupPath, "xtrabackup"); err != nil { + return "", "", err + } + + // Validate xbstream only if required + if requireXbstream { + if err := checkBinaryExecutable(xbstreamPath, "xbstream"); err != nil { + return "", "", fmt.Errorf("xbstream binary not found at %s (expected in same directory as xtrabackup): %v", xbstreamPath, err) + } + } + + return xtrabackupPath, xbstreamPath, nil +} diff --git a/utils/precheck.go b/utils/precheck.go new file mode 100644 index 0000000..d83ac7d --- /dev/null +++ b/utils/precheck.go @@ -0,0 +1,1000 @@ +package utils + +import ( + "database/sql" + "fmt" + "net" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/fatih/color" + "github.com/gioco-play/easy-i18n/i18n" +) + +// CheckResult represents the result of a single check +type CheckResult struct { + Status string // "OK", "WARNING", "ERROR", "INFO", "RECOMMEND" + Item string + Value string + Recommended string + Message string +} + +// SystemResources contains system resource information +type SystemResources struct { + CPUCores int + TotalMemory int64 // bytes + AvailableMemory int64 // bytes + NetworkInfo string +} + +// CheckDependencies checks all required and optional tools +func CheckDependencies(cfg *Config, compressType string) []CheckResult { + var results []CheckResult + + // Check xtrabackup (required) + xtrabackupPath, xbstreamPath, err := ResolveXtrabackupPath(cfg, true) + if err != nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "xtrabackup", + Value: "not found", + Message: fmt.Sprintf("xtrabackup not found. %s", err.Error()), + }) + // If xtrabackup not found, xbstream check will also fail, but we continue + } else { + // Get xtrabackup version + cmd := exec.Command(xtrabackupPath, "--version") + out, err := cmd.CombinedOutput() + versionStr := "unknown" + if err == nil { + // Extract version from output + output := string(out) + lines := strings.Split(output, "\n") + for _, line := range lines { + if strings.Contains(line, "version") || strings.Contains(line, "Version") { + versionStr = strings.TrimSpace(line) + break + } + } + } + results = append(results, CheckResult{ + Status: "OK", + Item: "xtrabackup", + Value: fmt.Sprintf("found at %s (%s)", xtrabackupPath, versionStr), + Message: "", + }) + + // Check xbstream (required for backup) + if xbstreamPath != "" { + results = append(results, CheckResult{ + Status: "OK", + Item: "xbstream", + Value: fmt.Sprintf("found at %s", xbstreamPath), + Message: "", + }) + } + } + + // Check zstd (optional, if compressType includes zstd) + if compressType == "zstd" { + zstdPath, err := exec.LookPath("zstd") + if err != nil { + results = append(results, CheckResult{ + Status: "WARNING", + Item: "zstd", + Value: "not found", + Message: "zstd not found in PATH, install from https://github.com/facebook/zstd", + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "zstd", + Value: fmt.Sprintf("found at %s", zstdPath), + Message: "", + }) + } + } + + // Check qpress (optional, if compressType includes qp) + if compressType == "qp" { + qpressPath, err := exec.LookPath("qpress") + if err != nil { + results = append(results, CheckResult{ + Status: "WARNING", + Item: "qpress", + Value: "not found", + Message: "qpress not found in PATH, install from https://github.com/mariadb-corporation/qpress", + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "qpress", + Value: fmt.Sprintf("found at %s", qpressPath), + Message: "", + }) + } + } + + return results +} + +// ValidateDefaultsFile validates if the defaults-file is correct +// It checks if the file exists, is readable, contains MySQL sections, and +// if a database connection is available, verifies that the datadir in the +// config file matches the actual datadir used by MySQL server. +func ValidateDefaultsFile(defaultsFile string, db *sql.DB) CheckResult { + if defaultsFile == "" { + return CheckResult{ + Status: "WARNING", + Item: "defaults-file", + Value: "not found", + Message: "Could not auto-detect MySQL config file. You may need to specify it manually using --defaults-file.", + } + } + + // Check if file exists and is readable + if info, err := os.Stat(defaultsFile); err != nil || info.IsDir() { + return CheckResult{ + Status: "ERROR", + Item: "defaults-file", + Value: defaultsFile, + Message: fmt.Sprintf("File does not exist or is not readable: %v", err), + } + } + + // Try to validate by checking if the file contains MySQL configuration sections + content, err := os.ReadFile(defaultsFile) + if err != nil { + return CheckResult{ + Status: "WARNING", + Item: "defaults-file", + Value: defaultsFile, + Message: fmt.Sprintf("Could not read file for validation: %v", err), + } + } + + contentStr := string(content) + hasMySQLSection := strings.Contains(contentStr, "[mysqld]") || + strings.Contains(contentStr, "[mysql]") || + strings.Contains(contentStr, "[client]") + + if !hasMySQLSection { + return CheckResult{ + Status: "WARNING", + Item: "defaults-file", + Value: defaultsFile, + Message: "File does not appear to contain MySQL configuration sections. Please verify this is the correct config file.", + } + } + + // If we have a database connection, try to verify by checking if datadir matches + if db != nil { + actualDatadir := GetMySQLVariable(db, "datadir") + if actualDatadir != "" { + // Parse config file to find datadir setting + configDatadir := parseDatadirFromConfig(contentStr) + if configDatadir != "" { + // Normalize paths for comparison + actualDatadirNorm := filepath.Clean(actualDatadir) + configDatadirNorm := filepath.Clean(configDatadir) + + if actualDatadirNorm != configDatadirNorm { + return CheckResult{ + Status: "ERROR", + Item: "defaults-file", + Value: defaultsFile, + Message: fmt.Sprintf("CRITICAL: Config file datadir (%s) does not match MySQL server datadir (%s). This config file is likely NOT the one MySQL is using. Please specify the correct --defaults-file or remove it to let xtrabackup use default behavior.", configDatadir, actualDatadir), + } + } + } + } + } + + return CheckResult{ + Status: "OK", + Item: "defaults-file", + Value: defaultsFile, + Message: "Config file found and appears valid", + } +} + +// parseDatadirFromConfig extracts datadir value from MySQL config file content +func parseDatadirFromConfig(content string) string { + lines := strings.Split(content, "\n") + inMysqldSection := false + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Skip comments and empty lines + if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") { + continue + } + + // Check for section headers + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + section := strings.Trim(line, "[]") + inMysqldSection = (section == "mysqld" || section == "mysqld_safe") + continue + } + + // Look for datadir in [mysqld] or [mysqld_safe] section + if inMysqldSection && strings.HasPrefix(strings.ToLower(line), "datadir") { + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 { + datadir := strings.TrimSpace(parts[1]) + // Remove quotes if present + datadir = strings.Trim(datadir, "\"'") + return datadir + } + } + } + + return "" +} + +// CheckMySQLCompatibility performs MySQL compatibility checks +func CheckMySQLCompatibility(db *sql.DB, cfg *Config) []CheckResult { + var results []CheckResult + + if db == nil { + return results + } + + // Collect MySQL variables (silent mode to avoid duplicate output) + options := CollectVariableFromMySQLServerSilent(db, true) + + // Check MySQL version + if version, ok := options["version"]; ok && version != "" { + // Parse version manually (same logic as checker.go) + header := strings.Split(version, "-")[0] + vers := strings.Split(header, ".") + var v Version + if len(vers) == 3 { + major, _ := strconv.Atoi(vers[0]) + minor, _ := strconv.Atoi(vers[1]) + micro, _ := strconv.Atoi(vers[2]) + v = Version{major, minor, micro} + } + cfg.MysqlVersion = v + status := "OK" + message := "" + if v.major == 5 && v.minor == 7 { + message = "MySQL 5.7" + } else if v.major == 8 && v.minor == 0 && v.micro <= 36 { + message = "MySQL 8.0" + } else { + status = "WARNING" + message = "Version may be newer than supported versions" + } + results = append(results, CheckResult{ + Status: status, + Item: "MySQL version", + Value: version, + Message: message, + }) + } + + // Check xtrabackup version compatibility + mysqlVer := cfg.MysqlVersion + xtrabackupPath, _, err := ResolveXtrabackupPath(cfg, false) + if err == nil { + cmd := exec.Command(xtrabackupPath, "--version") + out, err := cmd.CombinedOutput() + if err == nil { + versionStr := string(out) + re := regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9]+))?`) + match := re.FindStringSubmatch(versionStr) + if len(match) >= 4 { + var xtrabackupVerParts [4]int + xtrabackupVerParts[0], _ = strconv.Atoi(match[1]) + xtrabackupVerParts[1], _ = strconv.Atoi(match[2]) + xtrabackupVerParts[2], _ = strconv.Atoi(match[3]) + if len(match) >= 5 && match[4] != "" { + xtrabackupVerParts[3], _ = strconv.Atoi(match[4]) + } + + status := "OK" + message := "" + if mysqlVer.major == 5 && mysqlVer.minor == 7 { + if xtrabackupVerParts[0] == 2 && xtrabackupVerParts[1] == 4 { + message = "MySQL 5.7 with xtrabackup 2.4, compatible" + } else { + status = "WARNING" + message = fmt.Sprintf("MySQL 5.7 recommends xtrabackup 2.4, but detected %d.%d", xtrabackupVerParts[0], xtrabackupVerParts[1]) + } + } else if mysqlVer.major == 8 && mysqlVer.minor == 0 { + if xtrabackupVerParts[0] == 8 && xtrabackupVerParts[1] == 0 { + message = "MySQL 8.0 with xtrabackup 8.0, compatible" + if XtrabackupVersionGreaterOrEqual(xtrabackupVerParts, [4]int{8, 0, 34, 29}) { + message += " (Note: xtrabackup 8.0.34-29+, default zstd compression may cause recovery issues)" + } + } else { + status = "WARNING" + message = fmt.Sprintf("MySQL 8.0 recommends xtrabackup 8.0, but detected %d.%d", xtrabackupVerParts[0], xtrabackupVerParts[1]) + } + } + + results = append(results, CheckResult{ + Status: status, + Item: "xtrabackup compatibility", + Value: fmt.Sprintf("%d.%d.%d", xtrabackupVerParts[0], xtrabackupVerParts[1], xtrabackupVerParts[2]), + Message: message, + }) + } + } + } + + // Calculate data size + datadir, err := GetDatadirFromMySQL(db) + if err == nil { + totalSize, err := CalculateBackupSize(datadir) + if err == nil { + results = append(results, CheckResult{ + Status: "OK", + Item: "Estimated backup size", + Value: formatBytesForCheck(totalSize), + Message: fmt.Sprintf("Based on datadir: %s", datadir), + }) + } + } + + // Check replication parameters + replicationItems := []string{"server_id", "log_bin", "gtid_mode", "enforce_gtid_consistency"} + var repValues []string + for _, item := range replicationItems { + if val, ok := options[item]; ok && val != "" { + repValues = append(repValues, fmt.Sprintf("%s=%s", item, val)) + } + } + if len(repValues) > 0 { + results = append(results, CheckResult{ + Status: "OK", + Item: "Replication parameters", + Value: strings.Join(repValues, ", "), + Message: "", + }) + } + + // Validate defaults-file (only if explicitly set in config) + // We do NOT auto-detect to avoid using wrong config file (e.g., from another MySQL instance) + // User should explicitly specify --defaults-file if they want to use it + if cfg.DefaultsFile != "" { + result := ValidateDefaultsFile(cfg.DefaultsFile, db) + results = append(results, result) + } + + return results +} + +// CheckForBackupMode performs checks specific to backup mode +func CheckForBackupMode(cfg *Config, compressType string, db *sql.DB, streamHost string, streamPort int) []CheckResult { + var results []CheckResult + + // Check dependencies (xtrabackup, xbstream, compression tools) + depResults := CheckDependencies(cfg, compressType) + results = append(results, depResults...) + + // Check MySQL connection and compatibility (required for backup) + if db == nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "MySQL connection", + Value: "not available", + Message: "MySQL connection is required for backup mode. Please provide --host, --port, --user, and --password.", + }) + } else { + // Check MySQL compatibility + mysqlResults := CheckMySQLCompatibility(db, cfg) + results = append(results, mysqlResults...) + } + + // Check TCP connectivity if stream-port or stream-host+stream-port is specified + if streamPort > 0 { + if streamHost != "" { + // Check remote connectivity + connectivityResults := CheckTCPConnectivity(streamHost, streamPort) + results = append(results, connectivityResults...) + } else { + // Check local port listenability + // Use default timeout for check mode (60s) + listenResults := CheckTCPPortListenability(streamPort, 60) + results = append(results, listenResults...) + } + } + + return results +} + +// CheckForDownloadMode performs checks specific to download mode +func CheckForDownloadMode(cfg *Config, compressType string, targetDir string, streamHost string, streamPort int) []CheckResult { + var results []CheckResult + + // Check compression/extraction dependencies if needed + if compressType != "" { + if targetDir != "" { + // Extraction mode: check extraction dependencies + xtrabackupPath, xbstreamPath, err := ResolveXtrabackupPath(cfg, true) + if err != nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "xtrabackup/xbstream", + Value: "not found", + Message: fmt.Sprintf("Extraction requires xtrabackup/xbstream: %v", err), + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "xtrabackup/xbstream", + Value: fmt.Sprintf("found at %s, %s", xtrabackupPath, xbstreamPath), + Message: "", + }) + } + + // Check compression tool + if compressType == "zstd" { + zstdPath, err := exec.LookPath("zstd") + if err != nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "zstd", + Value: "not found", + Message: "zstd is required for decompression. Install from https://github.com/facebook/zstd", + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "zstd", + Value: fmt.Sprintf("found at %s", zstdPath), + Message: "", + }) + } + } else if compressType == "qp" { + qpressPath, err := exec.LookPath("qpress") + if err != nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "qpress", + Value: "not found", + Message: "qpress is required for decompression. Install from https://github.com/mariadb-corporation/qpress", + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "qpress", + Value: fmt.Sprintf("found at %s", qpressPath), + Message: "", + }) + } + } + } + } else if targetDir != "" { + // No compression but extraction requested: check xbstream + _, xbstreamPath, err := ResolveXtrabackupPath(cfg, true) + if err != nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "xbstream", + Value: "not found", + Message: fmt.Sprintf("Extraction requires xbstream: %v", err), + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "xbstream", + Value: fmt.Sprintf("found at %s", xbstreamPath), + Message: "", + }) + } + } + + // Check target directory if specified + if targetDir != "" { + if info, err := os.Stat(targetDir); err == nil { + if !info.IsDir() { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "target-dir", + Value: targetDir, + Message: "Target directory path exists but is not a directory", + }) + } else { + // Check if directory is writable + testFile := filepath.Join(targetDir, ".backup-helper-test") + if f, err := os.Create(testFile); err == nil { + f.Close() + os.Remove(testFile) + results = append(results, CheckResult{ + Status: "OK", + Item: "target-dir", + Value: targetDir, + Message: "Directory exists and is writable", + }) + } else { + results = append(results, CheckResult{ + Status: "WARNING", + Item: "target-dir", + Value: targetDir, + Message: fmt.Sprintf("Directory exists but may not be writable: %v", err), + }) + } + } + } else if os.IsNotExist(err) { + // Directory doesn't exist, check if parent is writable + parentDir := filepath.Dir(targetDir) + if info, err := os.Stat(parentDir); err == nil && info.IsDir() { + testFile := filepath.Join(parentDir, ".backup-helper-test") + if f, err := os.Create(testFile); err == nil { + f.Close() + os.Remove(testFile) + results = append(results, CheckResult{ + Status: "OK", + Item: "target-dir", + Value: targetDir, + Message: "Directory does not exist but parent is writable (will be created)", + }) + } else { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "target-dir", + Value: targetDir, + Message: fmt.Sprintf("Directory does not exist and parent is not writable: %v", err), + }) + } + } else { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "target-dir", + Value: targetDir, + Message: fmt.Sprintf("Directory does not exist and parent directory is invalid: %v", err), + }) + } + } + } + + // Check TCP connectivity if stream-port or stream-host+stream-port is specified + if streamPort > 0 { + if streamHost != "" { + // Check remote connectivity + connectivityResults := CheckTCPConnectivity(streamHost, streamPort) + results = append(results, connectivityResults...) + } else { + // Check local port listenability + // Use default timeout for check mode (60s) + listenResults := CheckTCPPortListenability(streamPort, 60) + results = append(results, listenResults...) + } + } + + return results +} + +// CheckTCPPortListenability checks if a local port can be listened on and waits for a connection with timeout +// This is used for --check mode to verify port availability +// timeoutSeconds: connection timeout in seconds (0 means use default 60s, max 3600s) +func CheckTCPPortListenability(port int, timeoutSeconds int) []CheckResult { + var results []CheckResult + + if port <= 0 { + return results + } + + // Try to listen on the port + addr := fmt.Sprintf(":%d", port) + ln, err := net.Listen("tcp", addr) + if err != nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "TCP port listenability", + Value: fmt.Sprintf("port %d", port), + Message: fmt.Sprintf("Cannot listen on port %d: %v. Port may be in use or not accessible.", port, err), + }) + return results + } + + // Port can be listened on, now wait for a connection with timeout + // Set a timeout for accepting connections + if timeoutSeconds <= 0 { + timeoutSeconds = 60 // Default 60 seconds + } + if timeoutSeconds > 3600 { + timeoutSeconds = 3600 // Max 3600 seconds + } + timeout := time.Duration(timeoutSeconds) * time.Second + ln.(*net.TCPListener).SetDeadline(time.Now().Add(timeout)) + + i18n.Printf("Checking port %d: listening and waiting for connection (timeout: %v)...\n", port, timeout) + conn, err := ln.Accept() + ln.Close() // Close listener immediately after accepting or timeout + + if err != nil { + // Check if it's a timeout error + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + // Timeout is expected in check mode - port is available but no connection received + results = append(results, CheckResult{ + Status: "OK", + Item: "TCP port listenability", + Value: fmt.Sprintf("port %d", port), + Message: fmt.Sprintf("Port %d is available and can be listened on. No connection received within timeout (expected in check mode).", port), + }) + } else { + // Other error + results = append(results, CheckResult{ + Status: "WARNING", + Item: "TCP port listenability", + Value: fmt.Sprintf("port %d", port), + Message: fmt.Sprintf("Port %d can be listened on, but error accepting connection: %v", port, err), + }) + } + } else { + // Connection received - close it immediately + conn.Close() + results = append(results, CheckResult{ + Status: "OK", + Item: "TCP port listenability", + Value: fmt.Sprintf("port %d", port), + Message: fmt.Sprintf("Port %d is available and connection test successful.", port), + }) + } + + return results +} + +// CheckTCPConnectivity checks if a remote host:port is reachable +// This is used for --check mode to verify remote connectivity +func CheckTCPConnectivity(host string, port int) []CheckResult { + var results []CheckResult + + if host == "" || port <= 0 { + return results + } + + addr := net.JoinHostPort(host, strconv.Itoa(port)) + timeout := 10 * time.Second + + i18n.Printf("Checking connectivity to %s (timeout: %v)...\n", addr, timeout) + conn, err := net.DialTimeout("tcp", addr, timeout) + if err != nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "TCP connectivity", + Value: addr, + Message: fmt.Sprintf("Cannot connect to %s: %v. Check network connectivity, firewall rules, and that the remote service is running.", addr, err), + }) + return results + } + + // Connection successful - close it immediately + conn.Close() + results = append(results, CheckResult{ + Status: "OK", + Item: "TCP connectivity", + Value: addr, + Message: fmt.Sprintf("Successfully connected to %s.", addr), + }) + + return results +} + +// CheckForPrepareMode performs checks specific to prepare mode +func CheckForPrepareMode(cfg *Config, targetDir string, db *sql.DB) []CheckResult { + var results []CheckResult + + // Check xtrabackup (required, but xbstream not needed for prepare) + xtrabackupPath, _, err := ResolveXtrabackupPath(cfg, false) + if err != nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "xtrabackup", + Value: "not found", + Message: fmt.Sprintf("xtrabackup is required for prepare mode: %v", err), + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "xtrabackup", + Value: fmt.Sprintf("found at %s", xtrabackupPath), + Message: "", + }) + } + + // Check target directory (required) + if targetDir == "" { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "target-dir", + Value: "not specified", + Message: "--target-dir is required for prepare mode", + }) + } else { + if info, err := os.Stat(targetDir); err != nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "target-dir", + Value: targetDir, + Message: fmt.Sprintf("Backup directory does not exist: %v", err), + }) + } else if !info.IsDir() { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "target-dir", + Value: targetDir, + Message: "Target path exists but is not a directory", + }) + } else { + // Check if directory is readable + entries, err := os.ReadDir(targetDir) + if err != nil { + results = append(results, CheckResult{ + Status: "ERROR", + Item: "target-dir", + Value: targetDir, + Message: fmt.Sprintf("Cannot read backup directory: %v", err), + }) + } else if len(entries) == 0 { + results = append(results, CheckResult{ + Status: "WARNING", + Item: "target-dir", + Value: targetDir, + Message: "Backup directory is empty", + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "target-dir", + Value: targetDir, + Message: fmt.Sprintf("Backup directory exists and contains %d entries", len(entries)), + }) + } + } + } + + // MySQL connection is optional for prepare, but if provided, validate defaults-file + if db != nil && cfg.DefaultsFile != "" { + result := ValidateDefaultsFile(cfg.DefaultsFile, db) + results = append(results, result) + } + + return results +} + +// CheckSystemResources checks system resources +func CheckSystemResources() SystemResources { + resources := SystemResources{} + + // CPU cores + resources.CPUCores = runtime.NumCPU() + + // Memory - try multiple methods + // Method 1: Try syscall.Sysinfo on Linux + if runtime.GOOS == "linux" { + // Try to read /proc/meminfo + if meminfo, err := os.ReadFile("/proc/meminfo"); err == nil { + lines := strings.Split(string(meminfo), "\n") + for _, line := range lines { + if strings.HasPrefix(line, "MemTotal:") { + fields := strings.Fields(line) + if len(fields) >= 2 { + if kb, err := strconv.ParseInt(fields[1], 10, 64); err == nil { + resources.TotalMemory = kb * 1024 // Convert KB to bytes + } + } + } + if strings.HasPrefix(line, "MemAvailable:") { + fields := strings.Fields(line) + if len(fields) >= 2 { + if kb, err := strconv.ParseInt(fields[1], 10, 64); err == nil { + resources.AvailableMemory = kb * 1024 // Convert KB to bytes + } + } + } + } + } + } else if runtime.GOOS == "darwin" { + // macOS: use sysctl + cmd := exec.Command("sysctl", "-n", "hw.memsize") + if out, err := cmd.Output(); err == nil { + if size, err := strconv.ParseInt(strings.TrimSpace(string(out)), 10, 64); err == nil { + resources.TotalMemory = size + resources.AvailableMemory = size // macOS doesn't easily provide available memory + } + } + } + + // Network info - basic interface listing + interfaces, err := net.Interfaces() + if err == nil { + var ifNames []string + for _, iface := range interfaces { + if iface.Flags&net.FlagUp != 0 && iface.Flags&net.FlagLoopback == 0 { + ifNames = append(ifNames, iface.Name) + } + } + if len(ifNames) > 0 { + resources.NetworkInfo = strings.Join(ifNames, ", ") + } + } + + return resources +} + +// RecommendParameters recommends parameters based on system resources +func RecommendParameters(resources SystemResources, mysqlSize int64, compressType string, currentCfg *Config) []CheckResult { + var results []CheckResult + + // Recommend parallel + currentParallel := currentCfg.Parallel + if currentParallel == 0 { + currentParallel = 4 // default + } + recommendedParallel := resources.CPUCores + if recommendedParallel < 2 { + recommendedParallel = 2 + } + if recommendedParallel > 16 { + recommendedParallel = 16 + } + // For compression, can use more threads + if compressType != "" { + recommendedParallel = recommendedParallel * 2 + if recommendedParallel > 16 { + recommendedParallel = 16 + } + } + + parallelMsg := fmt.Sprintf("current: %d, recommended: %d (based on %d CPU cores)", currentParallel, recommendedParallel, resources.CPUCores) + if currentParallel != recommendedParallel { + results = append(results, CheckResult{ + Status: "RECOMMEND", + Item: "parallel", + Value: fmt.Sprintf("%d", currentParallel), + Recommended: fmt.Sprintf("%d", recommendedParallel), + Message: parallelMsg, + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "parallel", + Value: fmt.Sprintf("%d", currentParallel), + Message: parallelMsg, + }) + } + + // Recommend io-limit + currentIOLimit := currentCfg.IOLimit + if currentIOLimit == 0 { + currentIOLimit = 200 * 1024 * 1024 // 200MB/s default + } + recommendedIOLimit := int64(200 * 1024 * 1024) // Default 200MB/s + if resources.AvailableMemory > 0 { + // If we have a lot of memory, can increase IO limit + if resources.AvailableMemory > 16*1024*1024*1024 { // > 16GB + recommendedIOLimit = 300 * 1024 * 1024 // 300MB/s + } + } + + ioLimitMsg := "current: " + if currentIOLimit == -1 { + ioLimitMsg += "unlimited" + } else { + ioLimitMsg += formatBytesForCheck(currentIOLimit) + "/s" + } + ioLimitMsg += fmt.Sprintf(", recommended: %s/s (default)", formatBytesForCheck(recommendedIOLimit)) + + if currentIOLimit != recommendedIOLimit && currentIOLimit != -1 { + results = append(results, CheckResult{ + Status: "RECOMMEND", + Item: "io-limit", + Value: formatBytesForCheck(currentIOLimit) + "/s", + Recommended: formatBytesForCheck(recommendedIOLimit) + "/s", + Message: ioLimitMsg, + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "io-limit", + Value: ioLimitMsg, + Message: "", + }) + } + + // Recommend use-memory + currentUseMemory := currentCfg.UseMemory + if currentUseMemory == "" { + currentUseMemory = "1G" + } + recommendedUseMemory := "1G" // Default + if resources.AvailableMemory > 0 { + // Recommend 25% of available memory, but between 1G and 8G + recommendedBytes := resources.AvailableMemory / 4 + if recommendedBytes < 1024*1024*1024 { + recommendedUseMemory = "1G" + } else if recommendedBytes > 8*1024*1024*1024 { + recommendedUseMemory = "8G" + } else { + recommendedUseMemory = formatBytesForCheck(recommendedBytes) + } + } + + useMemoryMsg := fmt.Sprintf("current: %s, recommended: %s", currentUseMemory, recommendedUseMemory) + if resources.AvailableMemory > 0 { + useMemoryMsg += fmt.Sprintf(" (based on %.1f GB available memory)", float64(resources.AvailableMemory)/(1024*1024*1024)) + } + + if currentUseMemory != recommendedUseMemory { + results = append(results, CheckResult{ + Status: "RECOMMEND", + Item: "use-memory", + Value: currentUseMemory, + Recommended: recommendedUseMemory, + Message: useMemoryMsg, + }) + } else { + results = append(results, CheckResult{ + Status: "OK", + Item: "use-memory", + Value: currentUseMemory, + Message: useMemoryMsg, + }) + } + + return results +} + +// PrintCheckResults prints check results in a formatted way +func PrintCheckResults(section string, results []CheckResult) { + i18n.Printf("\n=== %s ===\n", section) + for _, result := range results { + var statusColor func(string, ...interface{}) string + switch result.Status { + case "OK": + statusColor = color.GreenString + case "WARNING": + statusColor = color.YellowString + case "ERROR": + statusColor = color.RedString + case "INFO": + statusColor = color.CyanString + case "RECOMMEND": + statusColor = color.MagentaString + default: + statusColor = func(s string, args ...interface{}) string { + return fmt.Sprintf(s, args...) + } + } + + statusStr := statusColor("[%s]", result.Status) + output := fmt.Sprintf("%s %s: %s", statusStr, result.Item, result.Value) + + if result.Recommended != "" { + output += fmt.Sprintf(" (recommended: %s)", result.Recommended) + } + + if result.Message != "" { + output += fmt.Sprintf(" - %s", result.Message) + } + + i18n.Printf("%s\n", output) + } +} + +// formatBytesForCheck formats bytes to human-readable format (internal use in precheck) +func formatBytesForCheck(bytes int64) string { + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) +} diff --git a/utils/progress.go b/utils/progress.go index 571bdb3..5303167 100644 --- a/utils/progress.go +++ b/utils/progress.go @@ -23,6 +23,7 @@ type ProgressTracker struct { startOnce sync.Once mode string // "upload" or "download" outputToStderr bool // If true, output progress to stderr instead of stdout + isCompressed bool // If true, don't show percentage (compression changes size) } // NewProgressTracker creates a new progress tracker @@ -35,9 +36,17 @@ func NewProgressTracker(totalBytes int64) *ProgressTracker { lastBytes: 0, isComplete: false, mode: "upload", // default to upload + isCompressed: false, // default to not compressed } } +// NewProgressTrackerWithCompression creates a new progress tracker with compression flag +func NewProgressTrackerWithCompression(totalBytes int64, isCompressed bool) *ProgressTracker { + pt := NewProgressTracker(totalBytes) + pt.isCompressed = isCompressed + return pt +} + // NewDownloadProgressTracker creates a new progress tracker for download mode func NewDownloadProgressTracker(totalBytes int64) *ProgressTracker { pt := NewProgressTracker(totalBytes) @@ -75,7 +84,8 @@ func (pt *ProgressTracker) Complete() { } // Clear the progress line and add a newline - fmt.Fprint(outputWriter, "\r"+strings.Repeat(" ", 100)+"\r\n") + // Use more spaces to ensure complete clearing (terminal width is typically 80-120 chars) + fmt.Fprint(outputWriter, "\r"+strings.Repeat(" ", 150)+"\r\n") // Only calculate duration if we actually started (startTime is not zero) if pt.startTime.IsZero() { @@ -126,7 +136,8 @@ func (pt *ProgressTracker) displayProgress() { // Display progress var progressLine string - if pt.totalBytes > 0 { + if pt.totalBytes > 0 && !pt.isCompressed { + // Show percentage only when not compressed percentage := float64(uploaded) * 100.0 / float64(pt.totalBytes) progressLine = fmt.Sprintf("\rProgress: %s / %s (%.1f%%) - %s/s - Duration: %s", FormatBytes(uploaded), @@ -136,7 +147,7 @@ func (pt *ProgressTracker) displayProgress() { formatDuration(now.Sub(pt.startTime)), ) } else { - // Unknown total size + // Unknown total size or compressed - don't show percentage progressLine = fmt.Sprintf("\rProgress: %s - %s/s - Duration: %s", FormatBytes(uploaded), FormatBytes(int64(speed)), @@ -160,6 +171,7 @@ type ProgressReader struct { reader io.Reader tracker *ProgressTracker bufSize int + err error // Store the last error encountered } // NewProgressReader creates a new progress reader @@ -180,13 +192,23 @@ func (pr *ProgressReader) Read(p []byte) (n int, err error) { if n > 0 { pr.tracker.Update(int64(n)) } + // Store error for later inspection + if err != nil { + pr.err = err + } return n, err } +// GetError returns the last error encountered, if any +func (pr *ProgressReader) GetError() error { + return pr.err +} + // ProgressWriter wraps an io.Writer to track progress type ProgressWriter struct { writer io.Writer tracker *ProgressTracker + err error // Store the last error encountered } // NewProgressWriter creates a new progress writer @@ -203,9 +225,18 @@ func (pw *ProgressWriter) Write(p []byte) (n int, err error) { if n > 0 { pw.tracker.Update(int64(n)) } + // Store error for later inspection + if err != nil { + pw.err = err + } return n, err } +// GetError returns the last error encountered, if any +func (pw *ProgressWriter) GetError() error { + return pw.err +} + // FormatBytes formats bytes to human-readable format (exported for use in other packages) func FormatBytes(bytes int64) string { const unit = 1024 diff --git a/utils/rate_limit.go b/utils/rate_limit.go index 1c7e3fe..5b857b8 100644 --- a/utils/rate_limit.go +++ b/utils/rate_limit.go @@ -6,21 +6,26 @@ import ( "time" ) -// RateLimitedWriter wraps an io.Writer with rate limiting +// RateLimitedWriter wraps an io.Writer with rate limiting using a proper token bucket type RateLimitedWriter struct { - writer io.Writer - rateLimit int64 // bytes per second - lastWrite time.Time - bytesWritten int64 - mu sync.Mutex + writer io.Writer + rateLimit int64 // bytes per second + tokens float64 // Current tokens in bucket + capacity float64 // Bucket capacity (allow some burst for smoothness) + lastUpdate time.Time + mu sync.Mutex } // NewRateLimitedWriter creates a new rate-limited writer func NewRateLimitedWriter(writer io.Writer, rateLimit int64) *RateLimitedWriter { + // Allow burst up to 2x rate limit for smoothness + capacity := float64(rateLimit) * 2 return &RateLimitedWriter{ - writer: writer, - rateLimit: rateLimit, - lastWrite: time.Now(), + writer: writer, + rateLimit: rateLimit, + tokens: capacity, // Start with full bucket + capacity: capacity, + lastUpdate: time.Now(), } } @@ -28,6 +33,11 @@ func NewRateLimitedWriter(writer io.Writer, rateLimit int64) *RateLimitedWriter func (rlw *RateLimitedWriter) UpdateRateLimit(newLimit int64) { rlw.mu.Lock() rlw.rateLimit = newLimit + rlw.capacity = float64(newLimit) * 2 + // Adjust tokens proportionally if needed + if rlw.tokens > rlw.capacity { + rlw.tokens = rlw.capacity + } rlw.mu.Unlock() } @@ -49,34 +59,43 @@ func (rlw *RateLimitedWriter) Write(p []byte) (n int, err error) { return rlw.writer.Write(p) } - // Use token bucket algorithm for rate limiting totalWritten := 0 for totalWritten < len(p) { - now := time.Now() rlw.mu.Lock() - elapsed := now.Sub(rlw.lastWrite).Seconds() + now := time.Now() - // Calculate how many bytes we can write - if elapsed > 1.0 { - // Reset counter every second - rlw.bytesWritten = 0 - rlw.lastWrite = now - elapsed = 0 + // Refill tokens based on elapsed time + elapsed := now.Sub(rlw.lastUpdate).Seconds() + if elapsed > 0 { + // Add tokens at rateLimit bytes per second + rlw.tokens += float64(rateLimit) * elapsed + if rlw.tokens > rlw.capacity { + rlw.tokens = rlw.capacity + } + rlw.lastUpdate = now } - allowedBytes := int64(float64(rateLimit) * elapsed) - available := allowedBytes - rlw.bytesWritten + // Calculate how many bytes we can write + available := int64(rlw.tokens) + rlw.mu.Unlock() if available <= 0 { - // Need to wait - waitTime := time.Duration(-float64(available) / float64(rateLimit) * float64(time.Second)) - rlw.mu.Unlock() - time.Sleep(waitTime) - rlw.mu.Lock() - now = time.Now() - rlw.lastWrite = now - rlw.bytesWritten = 0 - available = rateLimit + // Need to wait for tokens + // Calculate wait time: tokens needed / rate + needed := float64(len(p) - totalWritten) + if needed > rlw.capacity { + needed = rlw.capacity + } + waitTime := time.Duration((needed - rlw.tokens) / float64(rateLimit) * float64(time.Second)) + if waitTime > 0 { + // Use smaller sleep increments for better precision + if waitTime > 100*time.Millisecond { + time.Sleep(100 * time.Millisecond) + } else { + time.Sleep(waitTime) + } + } + continue } // Write as much as we can @@ -85,8 +104,6 @@ func (rlw *RateLimitedWriter) Write(p []byte) (n int, err error) { writeSize = int(available) } - rlw.mu.Unlock() - written, writeErr := rlw.writer.Write(p[totalWritten : totalWritten+writeSize]) totalWritten += written @@ -94,8 +111,12 @@ func (rlw *RateLimitedWriter) Write(p []byte) (n int, err error) { return totalWritten, writeErr } + // Consume tokens rlw.mu.Lock() - rlw.bytesWritten += int64(written) + rlw.tokens -= float64(written) + if rlw.tokens < 0 { + rlw.tokens = 0 + } rlw.mu.Unlock() } @@ -110,21 +131,26 @@ func (rlw *RateLimitedWriter) Close() error { return nil } -// RateLimitedReader wraps an io.Reader with rate limiting +// RateLimitedReader wraps an io.Reader with rate limiting using a proper token bucket type RateLimitedReader struct { - reader io.Reader - rateLimit int64 // bytes per second - lastRead time.Time - bytesRead int64 - mu sync.Mutex + reader io.Reader + rateLimit int64 // bytes per second + tokens float64 // Current tokens in bucket + capacity float64 // Bucket capacity (allow some burst for smoothness) + lastUpdate time.Time + mu sync.Mutex } // NewRateLimitedReader creates a new rate-limited reader func NewRateLimitedReader(reader io.Reader, rateLimit int64) *RateLimitedReader { + // Allow burst up to 2x rate limit for smoothness + capacity := float64(rateLimit) * 2 return &RateLimitedReader{ - reader: reader, - rateLimit: rateLimit, - lastRead: time.Now(), + reader: reader, + rateLimit: rateLimit, + tokens: capacity, // Start with full bucket + capacity: capacity, + lastUpdate: time.Now(), } } @@ -139,34 +165,43 @@ func (rlr *RateLimitedReader) Read(p []byte) (n int, err error) { return rlr.reader.Read(p) } - // Use token bucket algorithm for rate limiting totalRead := 0 for totalRead < len(p) { - now := time.Now() rlr.mu.Lock() - elapsed := now.Sub(rlr.lastRead).Seconds() + now := time.Now() - // Calculate how many bytes we can read - if elapsed > 1.0 { - // Reset counter every second - rlr.bytesRead = 0 - rlr.lastRead = now - elapsed = 0 + // Refill tokens based on elapsed time + elapsed := now.Sub(rlr.lastUpdate).Seconds() + if elapsed > 0 { + // Add tokens at rateLimit bytes per second + rlr.tokens += float64(rateLimit) * elapsed + if rlr.tokens > rlr.capacity { + rlr.tokens = rlr.capacity + } + rlr.lastUpdate = now } - allowedBytes := int64(float64(rateLimit) * elapsed) - available := allowedBytes - rlr.bytesRead + // Calculate how many bytes we can read + available := int64(rlr.tokens) + rlr.mu.Unlock() if available <= 0 { - // Need to wait - waitTime := time.Duration(-float64(available) / float64(rateLimit) * float64(time.Second)) - rlr.mu.Unlock() - time.Sleep(waitTime) - rlr.mu.Lock() - now = time.Now() - rlr.lastRead = now - rlr.bytesRead = 0 - available = rateLimit + // Need to wait for tokens + // Calculate wait time: tokens needed / rate + needed := float64(len(p) - totalRead) + if needed > rlr.capacity { + needed = rlr.capacity + } + waitTime := time.Duration((needed - rlr.tokens) / float64(rateLimit) * float64(time.Second)) + if waitTime > 0 { + // Use smaller sleep increments for better precision + if waitTime > 100*time.Millisecond { + time.Sleep(100 * time.Millisecond) + } else { + time.Sleep(waitTime) + } + } + continue } // Read as much as we can @@ -175,8 +210,6 @@ func (rlr *RateLimitedReader) Read(p []byte) (n int, err error) { readSize = int(available) } - rlr.mu.Unlock() - read, readErr := rlr.reader.Read(p[totalRead : totalRead+readSize]) totalRead += read @@ -184,8 +217,12 @@ func (rlr *RateLimitedReader) Read(p []byte) (n int, err error) { return totalRead, readErr } + // Consume tokens rlr.mu.Lock() - rlr.bytesRead += int64(read) + rlr.tokens -= float64(read) + if rlr.tokens < 0 { + rlr.tokens = 0 + } rlr.mu.Unlock() // If we read less than requested, we're done diff --git a/utils/ssh_receiver.go b/utils/ssh_receiver.go new file mode 100644 index 0000000..ff26559 --- /dev/null +++ b/utils/ssh_receiver.go @@ -0,0 +1,160 @@ +package utils + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "regexp" + "strconv" + "strings" + "time" +) + +// StartRemoteReceiverViaSSH starts backup-helper receiver on remote host via SSH +// If port > 0, uses that port; if port == 0, auto-finds available port +// Returns the port number where receiver is listening and the output path +func StartRemoteReceiverViaSSH( + sshHost string, + port int, // If > 0, use this port; if 0, auto-find + remoteOutput string, + estimatedSize int64, + enableHandshake bool, + handshakeKey string, +) (int, string, *exec.Cmd, func() error, error) { + + // Build remote backup-helper command + remoteCmd := []string{"backup-helper", "--download"} + + if port > 0 { + remoteCmd = append(remoteCmd, fmt.Sprintf("--stream-port=%d", port)) + } else { + remoteCmd = append(remoteCmd, "--stream-port=0") // Auto-find + } + + if remoteOutput != "" { + remoteCmd = append(remoteCmd, "--output", remoteOutput) + } + + if estimatedSize > 0 { + remoteCmd = append(remoteCmd, "--estimated-size", FormatBytes(estimatedSize)) + } + + if enableHandshake { + remoteCmd = append(remoteCmd, "--enable-handshake") + if handshakeKey != "" { + remoteCmd = append(remoteCmd, "--stream-key", handshakeKey) + } + } + + // Disable rate limiting on remote receiver, rate limiting is handled on sender side + remoteCmd = append(remoteCmd, "--io-limit=-1") + + // Execute SSH command (rely on system SSH config) + cmd := exec.Command("ssh", sshHost, strings.Join(remoteCmd, " ")) + + stdout, err := cmd.StdoutPipe() + if err != nil { + return 0, "", nil, nil, fmt.Errorf("failed to create stdout pipe: %v", err) + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return 0, "", nil, nil, fmt.Errorf("failed to create stderr pipe: %v", err) + } + + if err := cmd.Start(); err != nil { + return 0, "", nil, nil, fmt.Errorf("failed to start SSH: %v", err) + } + + // Parse output to find port and output path (from stderr) + actualPort, outputPath, err := parseReceiverInfo(stderr, port, remoteOutput) + if err != nil { + cmd.Process.Kill() + return 0, "", nil, nil, fmt.Errorf("failed to parse receiver info: %v", err) + } + + // IMPORTANT: Start goroutines to consume stdout/stderr to prevent buffer blocking + // After parsing the port, the remote process will continue to output progress info + // to stderr. If we don't consume it, the buffer will fill up and block the remote process. + go func() { + // Discard stdout (remote backup-helper doesn't output to stdout in download mode) + io.Copy(io.Discard, stdout) + }() + + go func() { + // Discard stderr after port parsing (we don't need progress info in SSH mode) + // This prevents the stderr buffer from filling up and blocking the remote process + io.Copy(io.Discard, stderr) + }() + + cleanupFunc := func() error { + // Send SIGTERM to remote backup-helper process + killCmd := exec.Command("ssh", sshHost, "pkill -TERM backup-helper") + killCmd.Run() + return cmd.Process.Kill() + } + + return actualPort, outputPath, cmd, cleanupFunc, nil +} + +// parseReceiverInfo parses both port and output path from backup-helper receiver output +// Looks for: "[backup-helper] Listening on :" +// Note: "Saved to: " comes after transfer completes, so we can't parse it here +// We return the port and use remoteOutput if provided +func parseReceiverInfo(reader io.Reader, expectedPort int, remoteOutput string) (int, string, error) { + scanner := bufio.NewScanner(reader) + portPattern := regexp.MustCompile(`Listening on [\d.]+:(\d+)`) + + timeout := time.After(10 * time.Second) + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + var actualPort int + + for { + select { + case <-timeout: + if actualPort == 0 { + return 0, "", fmt.Errorf("timeout waiting for receiver to start") + } + // We got the port, return it with remoteOutput (if provided) + return actualPort, remoteOutput, nil + case <-ticker.C: + if scanner.Scan() { + line := scanner.Text() + // Parse port + if matches := portPattern.FindStringSubmatch(line); matches != nil { + if port, err := strconv.Atoi(matches[1]); err == nil { + // If expectedPort was specified, validate it matches + if expectedPort > 0 && port != expectedPort { + return 0, "", fmt.Errorf("port mismatch: expected %d, got %d", expectedPort, port) + } + actualPort = port + // Got the port, return immediately + return actualPort, remoteOutput, nil + } + } + } else { + if err := scanner.Err(); err != nil { + if actualPort == 0 { + return 0, "", err + } + // We got the port, return it with remoteOutput + return actualPort, remoteOutput, nil + } + // EOF, wait a bit more + time.Sleep(200 * time.Millisecond) + } + } + } +} + +// parseReceiverPort parses the port from backup-helper receiver output +// Looks for: "[backup-helper] Listening on :" +// If expectedPort > 0, validates it matches +// DEPRECATED: Use parseReceiverInfo instead +func parseReceiverPort(reader io.Reader, expectedPort int) (int, error) { + port, _, err := parseReceiverInfo(reader, expectedPort, "") + return port, err +} diff --git a/utils/tcp_stream.go b/utils/tcp_stream.go index fc97c73..fabf8b6 100644 --- a/utils/tcp_stream.go +++ b/utils/tcp_stream.go @@ -41,8 +41,9 @@ func GetLocalIP() (string, error) { // StartStreamSender starts a TCP server on the given port for sending data. // It accepts connections and returns a WriteCloser for writing data to the remote client. // If port is 0, it will automatically find an available port. +// timeoutSeconds: connection timeout in seconds (0 means use default 60s, max 3600s) // Returns the actual listening port and local IP for display. -func StartStreamSender(port int, enableHandshake bool, handshakeKey string, totalSize int64) (io.WriteCloser, *ProgressTracker, func(), int, string, error) { +func StartStreamSender(port int, enableHandshake bool, handshakeKey string, totalSize int64, isCompressed bool, timeoutSeconds int, logCtx *LogContext) (io.WriteCloser, *ProgressTracker, func(), int, string, error) { var addr string var actualPort int @@ -51,6 +52,9 @@ func StartStreamSender(port int, enableHandshake bool, handshakeKey string, tota var err error actualPort, err = GetAvailablePort() if err != nil { + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to find available port: %v", err) + } return nil, nil, nil, 0, "", fmt.Errorf("failed to find available port: %v", err) } addr = fmt.Sprintf(":%d", actualPort) @@ -61,6 +65,9 @@ func StartStreamSender(port int, enableHandshake bool, handshakeKey string, tota ln, err := net.Listen("tcp", addr) if err != nil { + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to listen on port %d: %v", actualPort, err) + } return nil, nil, nil, 0, "", fmt.Errorf("failed to listen on port %d: %v", actualPort, err) } @@ -71,18 +78,45 @@ func StartStreamSender(port int, enableHandshake bool, handshakeKey string, tota } fmt.Printf("[backup-helper] Listening on %s:%d\n", localIP, actualPort) - fmt.Printf("[backup-helper] Waiting for remote connection...\n") + fmt.Printf("[backup-helper] Waiting for remote connection (timeout: %ds)...\n", timeoutSeconds) + if logCtx != nil { + logCtx.WriteLog("TCP", "Listening on %s:%d", localIP, actualPort) + logCtx.WriteLog("TCP", "Waiting for remote connection (timeout: %ds)", timeoutSeconds) + } + + // Set timeout for accepting connections + if timeoutSeconds <= 0 { + timeoutSeconds = 60 // Default 60 seconds + } + if timeoutSeconds > 3600 { + timeoutSeconds = 3600 // Max 3600 seconds + } + timeout := time.Duration(timeoutSeconds) * time.Second + ln.(*net.TCPListener).SetDeadline(time.Now().Add(timeout)) // Create progress tracker - tracker := NewProgressTracker(totalSize) + tracker := NewProgressTrackerWithCompression(totalSize, isCompressed) if !enableHandshake { conn, err := ln.Accept() if err != nil { ln.Close() + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + if logCtx != nil { + logCtx.WriteLog("TCP", "Connection timeout after %ds: %v", timeoutSeconds, err) + } + return nil, nil, nil, 0, "", fmt.Errorf("connection timeout after %ds on port %d: %v", timeoutSeconds, actualPort, err) + } + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to accept connection: %v", err) + } return nil, nil, nil, 0, "", fmt.Errorf("failed to accept connection on port %d: %v", actualPort, err) } fmt.Println("[backup-helper] Remote client connected, no handshake required.") + if logCtx != nil { + logCtx.WriteLog("TCP", "Remote client connected, no handshake required") + logCtx.WriteLog("TCP", "Transfer started") + } closer := func() { tracker.Complete(); conn.Close(); ln.Close() } progressWriter := NewProgressWriter(conn, tracker) return struct { @@ -93,10 +127,16 @@ func StartStreamSender(port int, enableHandshake bool, handshakeKey string, tota for { conn, err := ln.Accept() if err != nil { + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to accept connection: %v", err) + } ln.Close() return nil, nil, nil, 0, "", fmt.Errorf("failed to accept connection on port %d: %v", actualPort, err) } fmt.Println("[backup-helper] Remote client connected, waiting for handshake...") + if logCtx != nil { + logCtx.WriteLog("TCP", "Remote client connected, waiting for handshake") + } goAway := false // set timeout to prevent from hanging @@ -110,7 +150,9 @@ func StartStreamSender(port int, enableHandshake bool, handshakeKey string, tota line = strings.TrimSpace(line) if line == handshakeKey { conn.SetReadDeadline(time.Time{}) // cancel timeout - fmt.Println("[backup-helper] Handshake OK, start streaming backup...") + if logCtx != nil { + logCtx.WriteLog("TCP", "Handshake OK, transfer started") + } closer := func() { tracker.Complete(); conn.Close(); ln.Close() } progressWriter := NewProgressWriter(conn, tracker) return struct { @@ -129,11 +171,219 @@ func StartStreamSender(port int, enableHandshake bool, handshakeKey string, tota } } +// StartStreamClient connects to a remote TCP server and returns a WriteCloser for pushing data. +// Similar to `nc host port`, this function actively connects to the remote server. +// Returns the remote address for display. +func StartStreamClient(host string, port int, enableHandshake bool, handshakeKey string, totalSize int64, isCompressed bool, logCtx *LogContext) (io.WriteCloser, *ProgressTracker, func(), string, error) { + if host == "" { + return nil, nil, nil, "", fmt.Errorf("stream-host cannot be empty") + } + if port <= 0 { + return nil, nil, nil, "", fmt.Errorf("stream-port must be specified when using --stream-host") + } + + addr := fmt.Sprintf("%s:%d", host, port) + fmt.Printf("[backup-helper] Connecting to %s...\n", addr) + if logCtx != nil { + logCtx.WriteLog("TCP", "Connecting to %s", addr) + } + + // Create progress tracker + tracker := NewProgressTrackerWithCompression(totalSize, isCompressed) + + conn, err := net.DialTimeout("tcp", addr, 30*time.Second) + if err != nil { + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to connect to %s: %v", addr, err) + } + return nil, nil, nil, "", fmt.Errorf("failed to connect to %s: %v", addr, err) + } + + fmt.Printf("[backup-helper] Connected to %s\n", addr) + if logCtx != nil { + logCtx.WriteLog("TCP", "Connected to %s", addr) + } + + if !enableHandshake { + if logCtx != nil { + logCtx.WriteLog("TCP", "Transfer started (no handshake)") + } + closer := func() { + tracker.Complete() + conn.Close() + if logCtx != nil { + logCtx.WriteLog("TCP", "Transfer completed") + } + } + progressWriter := NewProgressWriter(conn, tracker) + return struct { + io.Writer + io.Closer + }{Writer: progressWriter, Closer: conn}, tracker, closer, addr, nil + } + + // Send handshake + handshakeMsg := handshakeKey + "\n" + _, err = conn.Write([]byte(handshakeMsg)) + if err != nil { + conn.Close() + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to send handshake: %v", err) + } + return nil, nil, nil, "", fmt.Errorf("failed to send handshake to %s: %v", addr, err) + } + + // Wait for handshake response + conn.SetReadDeadline(time.Now().Add(10 * time.Second)) + reader := bufio.NewReader(conn) + response, err := reader.ReadString('\n') + if err != nil { + conn.Close() + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to receive handshake response: %v", err) + } + return nil, nil, nil, "", fmt.Errorf("failed to receive handshake response from %s: %v", addr, err) + } + + response = strings.TrimSpace(response) + if response != "OK" && !strings.Contains(response, "OK") { + conn.Close() + if logCtx != nil { + logCtx.WriteLog("TCP", "Handshake failed: received '%s'", response) + } + return nil, nil, nil, "", fmt.Errorf("handshake failed: received '%s' from %s", response, addr) + } + + conn.SetReadDeadline(time.Time{}) // cancel timeout + fmt.Printf("[backup-helper] Handshake OK, start streaming backup to %s...\n", addr) + if logCtx != nil { + logCtx.WriteLog("TCP", "Handshake OK, transfer started") + } + + closer := func() { + tracker.Complete() + conn.Close() + if logCtx != nil { + logCtx.WriteLog("TCP", "Transfer completed") + } + } + progressWriter := NewProgressWriter(conn, tracker) + return struct { + io.Writer + io.Closer + }{Writer: progressWriter, Closer: conn}, tracker, closer, addr, nil +} + +// StartStreamClientReader starts a TCP client connection to the given host:port for reading data. +// It actively connects to the remote server and returns a ReadCloser for reading data. +// If handshake is enabled, it will wait for the server's handshake message and respond with "OK". +// Returns the remote address for display. +func StartStreamClientReader(host string, port int, enableHandshake bool, handshakeKey string, totalSize int64, isCompressed bool, logCtx *LogContext) (io.ReadCloser, *ProgressTracker, func(), string, error) { + if host == "" { + return nil, nil, nil, "", fmt.Errorf("stream-host cannot be empty") + } + if port <= 0 { + return nil, nil, nil, "", fmt.Errorf("stream-port must be specified when using --stream-host") + } + + addr := fmt.Sprintf("%s:%d", host, port) + fmt.Printf("[backup-helper] Connecting to %s...\n", addr) + if logCtx != nil { + logCtx.WriteLog("TCP", "Connecting to %s", addr) + } + + // Create progress tracker + tracker := NewProgressTrackerWithCompression(totalSize, isCompressed) + tracker.mode = "download" + + conn, err := net.DialTimeout("tcp", addr, 30*time.Second) + if err != nil { + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to connect to %s: %v", addr, err) + } + return nil, nil, nil, "", fmt.Errorf("failed to connect to %s: %v", addr, err) + } + + fmt.Printf("[backup-helper] Connected to %s\n", addr) + if logCtx != nil { + logCtx.WriteLog("TCP", "Connected to %s", addr) + } + + if !enableHandshake { + if logCtx != nil { + logCtx.WriteLog("TCP", "Transfer started (no handshake)") + } + closer := func() { + tracker.Complete() + conn.Close() + if logCtx != nil { + logCtx.WriteLog("TCP", "Transfer completed") + } + } + progressReader := NewProgressReader(conn, tracker, 64*1024) + return struct { + io.Reader + io.Closer + }{Reader: progressReader, Closer: conn}, tracker, closer, addr, nil + } + + // Wait for handshake from server + conn.SetReadDeadline(time.Now().Add(10 * time.Second)) + reader := bufio.NewReader(conn) + handshakeMsg, err := reader.ReadString('\n') + if err != nil { + conn.Close() + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to receive handshake: %v", err) + } + return nil, nil, nil, "", fmt.Errorf("failed to receive handshake from %s: %v", addr, err) + } + + handshakeMsg = strings.TrimSpace(handshakeMsg) + if handshakeMsg != handshakeKey { + conn.Close() + if logCtx != nil { + logCtx.WriteLog("TCP", "Handshake failed: received '%s', expected '%s'", handshakeMsg, handshakeKey) + } + return nil, nil, nil, "", fmt.Errorf("handshake failed: received '%s' from %s, expected '%s'", handshakeMsg, addr, handshakeKey) + } + + // Send OK response + _, err = conn.Write([]byte("OK\n")) + if err != nil { + conn.Close() + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to send handshake response: %v", err) + } + return nil, nil, nil, "", fmt.Errorf("failed to send handshake response to %s: %v", addr, err) + } + + conn.SetReadDeadline(time.Time{}) // cancel timeout + fmt.Printf("[backup-helper] Handshake OK, start receiving backup from %s...\n", addr) + if logCtx != nil { + logCtx.WriteLog("TCP", "Handshake OK, transfer started") + } + + closer := func() { + tracker.Complete() + conn.Close() + if logCtx != nil { + logCtx.WriteLog("TCP", "Transfer completed") + } + } + progressReader := NewProgressReader(conn, tracker, 64*1024) + return struct { + io.Reader + io.Closer + }{Reader: progressReader, Closer: conn}, tracker, closer, addr, nil +} + // StartStreamReceiver starts a TCP server on the given port for receiving data. // It accepts connections and returns a ReadCloser for reading data from the remote client. // If port is 0, it will automatically find an available port. +// timeoutSeconds: connection timeout in seconds (0 means use default 60s, max 3600s) // Returns the actual listening port and local IP for display. -func StartStreamReceiver(port int, enableHandshake bool, handshakeKey string, totalSize int64) (io.ReadCloser, *ProgressTracker, func(), int, string, error) { +func StartStreamReceiver(port int, enableHandshake bool, handshakeKey string, totalSize int64, isCompressed bool, timeoutSeconds int, logCtx *LogContext) (io.ReadCloser, *ProgressTracker, func(), int, string, error) { var addr string var actualPort int @@ -142,6 +392,9 @@ func StartStreamReceiver(port int, enableHandshake bool, handshakeKey string, to var err error actualPort, err = GetAvailablePort() if err != nil { + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to find available port: %v", err) + } return nil, nil, nil, 0, "", fmt.Errorf("failed to find available port: %v", err) } addr = fmt.Sprintf(":%d", actualPort) @@ -152,6 +405,9 @@ func StartStreamReceiver(port int, enableHandshake bool, handshakeKey string, to ln, err := net.Listen("tcp", addr) if err != nil { + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to listen on port %d: %v", actualPort, err) + } return nil, nil, nil, 0, "", fmt.Errorf("failed to listen on port %d: %v", actualPort, err) } @@ -162,20 +418,60 @@ func StartStreamReceiver(port int, enableHandshake bool, handshakeKey string, to } fmt.Fprintf(os.Stderr, "[backup-helper] Listening on %s:%d\n", localIP, actualPort) - fmt.Fprintf(os.Stderr, "[backup-helper] Waiting for remote connection...\n") + fmt.Fprintf(os.Stderr, "[backup-helper] Waiting for remote connection (timeout: %ds)...\n", timeoutSeconds) + if logCtx != nil { + logCtx.WriteLog("TCP", "Listening on %s:%d", localIP, actualPort) + logCtx.WriteLog("TCP", "Waiting for remote connection (timeout: %ds)", timeoutSeconds) + } + + // Set timeout for accepting connections + if timeoutSeconds <= 0 { + timeoutSeconds = 60 // Default 60 seconds + } + if timeoutSeconds > 3600 { + timeoutSeconds = 3600 // Max 3600 seconds + } + timeout := time.Duration(timeoutSeconds) * time.Second + ln.(*net.TCPListener).SetDeadline(time.Now().Add(timeout)) // Create progress tracker for download mode tracker := NewDownloadProgressTracker(totalSize) + tracker.isCompressed = isCompressed if !enableHandshake { conn, err := ln.Accept() if err != nil { ln.Close() + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + if logCtx != nil { + logCtx.WriteLog("TCP", "Connection timeout after %ds: %v", timeoutSeconds, err) + } + return nil, nil, nil, 0, "", fmt.Errorf("connection timeout after %ds on port %d: %v", timeoutSeconds, actualPort, err) + } + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to accept connection: %v", err) + } return nil, nil, nil, 0, "", fmt.Errorf("failed to accept connection on port %d: %v", actualPort, err) } fmt.Fprintf(os.Stderr, "[backup-helper] Remote client connected, no handshake required.\n") - closer := func() { tracker.Complete(); conn.Close(); ln.Close() } + if logCtx != nil { + logCtx.WriteLog("TCP", "Remote client connected, no handshake required") + logCtx.WriteLog("TCP", "Transfer started") + } progressReader := NewProgressReader(conn, tracker, 64*1024) + closer := func() { + tracker.Complete() + conn.Close() + ln.Close() + if logCtx != nil { + // Check if transfer completed normally (check if reader encountered EOF or error) + if err := progressReader.GetError(); err != nil && err != io.EOF { + logCtx.WriteLog("TCP", "Transfer interrupted: connection closed unexpectedly: %v", err) + } else { + logCtx.WriteLog("TCP", "Transfer completed") + } + } + } return struct { io.Reader io.Closer @@ -184,10 +480,16 @@ func StartStreamReceiver(port int, enableHandshake bool, handshakeKey string, to for { conn, err := ln.Accept() if err != nil { + if logCtx != nil { + logCtx.WriteLog("TCP", "Failed to accept connection: %v", err) + } ln.Close() return nil, nil, nil, 0, "", fmt.Errorf("failed to accept connection on port %d: %v", actualPort, err) } fmt.Fprintf(os.Stderr, "[backup-helper] Remote client connected, waiting for handshake...\n") + if logCtx != nil { + logCtx.WriteLog("TCP", "Remote client connected, waiting for handshake") + } goAway := false // set timeout to prevent from hanging @@ -201,9 +503,26 @@ func StartStreamReceiver(port int, enableHandshake bool, handshakeKey string, to line = strings.TrimSpace(line) if line == handshakeKey { conn.SetReadDeadline(time.Time{}) // cancel timeout + // Send OK response for handshake + conn.Write([]byte("OK\n")) fmt.Fprintf(os.Stderr, "[backup-helper] Handshake OK, start receiving backup...\n") - closer := func() { tracker.Complete(); conn.Close(); ln.Close() } + if logCtx != nil { + logCtx.WriteLog("TCP", "Handshake OK, transfer started") + } progressReader := NewProgressReader(conn, tracker, 64*1024) + closer := func() { + tracker.Complete() + conn.Close() + ln.Close() + if logCtx != nil { + // Check if transfer completed normally (check if reader encountered EOF or error) + if err := progressReader.GetError(); err != nil && err != io.EOF { + logCtx.WriteLog("TCP", "Transfer interrupted: connection closed unexpectedly: %v", err) + } else { + logCtx.WriteLog("TCP", "Transfer completed") + } + } + } return struct { io.Reader io.Closer