From 0faf8d353ada01b8547a350c451605c223bae0f5 Mon Sep 17 00:00:00 2001 From: Manu Date: Sat, 28 Mar 2026 12:22:56 +0100 Subject: [PATCH 01/21] Bump version to v1.3.0 and add changelog entry Prepare for 1.3.0 release: updated version strings in package.json and wiki/package.json, updated OpenAPI docs in api-docs/openapi.yaml and public/openapi.yaml to v1.3.0, and added a v1.3.0 section to wiki/changelog.md with Docker image tags and platform info. --- api-docs/openapi.yaml | 2 +- package.json | 2 +- public/openapi.yaml | 2 +- wiki/changelog.md | 10 ++++++++++ wiki/package.json | 2 +- 5 files changed, 14 insertions(+), 4 deletions(-) diff --git a/api-docs/openapi.yaml b/api-docs/openapi.yaml index 3ba93a7..50b2fba 100644 --- a/api-docs/openapi.yaml +++ b/api-docs/openapi.yaml @@ -1,7 +1,7 @@ openapi: 3.1.0 info: title: DBackup API - version: 1.2.1 + version: 1.3.0 description: | REST API for DBackup โ€” a self-hosted database backup automation platform with encryption, compression, and smart retention. diff --git a/package.json b/package.json index 7c7de6e..94ec4c5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "dbackup", - "version": "1.2.1", + "version": "1.3.0", "private": true, "scripts": { "dev": "next dev", diff --git a/public/openapi.yaml b/public/openapi.yaml index 3ba93a7..50b2fba 100644 --- a/public/openapi.yaml +++ b/public/openapi.yaml @@ -1,7 +1,7 @@ openapi: 3.1.0 info: title: DBackup API - version: 1.2.1 + version: 1.3.0 description: | REST API for DBackup โ€” a self-hosted database backup automation platform with encryption, compression, and smart retention. diff --git a/wiki/changelog.md b/wiki/changelog.md index 924d87e..37d5973 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -2,6 +2,16 @@ All notable changes to DBackup are documented here. +## v1.3.0 +*Release: In Progress* + +### ๐Ÿณ Docker + +- **Image**: `skyfay/dbackup:v1.3.0` +- **Also tagged as**: `latest`, `v1` +- **Platforms**: linux/amd64, linux/arm64 + + ## v1.2.1 - Execution Cancellation, MSSQL Progress & Dashboard Polish *Released: March 26, 2026* diff --git a/wiki/package.json b/wiki/package.json index 6cce039..8d3b4c4 100644 --- a/wiki/package.json +++ b/wiki/package.json @@ -1,6 +1,6 @@ { "name": "dbackup-wiki", - "version": "1.2.1", + "version": "1.3.0", "private": true, "scripts": { "dev": "vitepress dev", From 0dacc317a74d852c956c8cddefc1ca2792359b08 Mon Sep 17 00:00:00 2001 From: Manu Date: Sat, 28 Mar 2026 13:30:10 +0100 Subject: [PATCH 02/21] Support SSH mode for database adapters Introduce SSH-based execution for database adapters and the test SSH route. Adds a new SSH client library (src/lib/ssh/*) and integrates SSH handling into MySQL, PostgreSQL, MongoDB (and related dump/restore flows) to detect remote binaries, run commands, and stream dumps/restores over SSH. Update the test-ssh API route to use SshClient and split MSSQL SFTP-specific checks from generic SSH exec tests. UI updated to expose an SSH tab and reusable SshConfigSection for adapters that support connectionMode. These changes enable remote CLI-based backup/restore operations and remote database inspection when direct TCP connections are not available. --- src/app/api/adapters/test-ssh/route.ts | 129 ++++++++---- src/components/adapter/form-sections.tsx | 29 ++- .../adapters/database/mongodb/connection.ts | 90 ++++++++- src/lib/adapters/database/mongodb/dump.ts | 72 +++++++ src/lib/adapters/database/mongodb/restore.ts | 77 +++++++ src/lib/adapters/database/mysql/connection.ts | 188 +++++++++++++++--- src/lib/adapters/database/mysql/dump.ts | 78 ++++++++ src/lib/adapters/database/mysql/restore.ts | 82 ++++++++ .../adapters/database/postgres/connection.ts | 137 ++++++++++--- src/lib/adapters/database/postgres/dump.ts | 85 ++++++++ src/lib/adapters/database/postgres/restore.ts | 149 ++++++++++++++ src/lib/adapters/database/redis/connection.ts | 68 +++++++ src/lib/adapters/database/redis/dump.ts | 110 ++++++++++ .../adapters/database/sqlite/connection.ts | 14 +- src/lib/adapters/database/sqlite/dump.ts | 15 +- src/lib/adapters/database/sqlite/restore.ts | 15 +- .../adapters/database/sqlite/ssh-client.ts | 75 +------ src/lib/adapters/definitions.ts | 17 ++ src/lib/ssh/index.ts | 14 ++ src/lib/ssh/ssh-client.ts | 94 +++++++++ src/lib/ssh/utils.ts | 168 ++++++++++++++++ wiki/changelog.md | 11 + 22 files changed, 1519 insertions(+), 198 deletions(-) create mode 100644 src/lib/ssh/index.ts create mode 100644 src/lib/ssh/ssh-client.ts create mode 100644 src/lib/ssh/utils.ts diff --git a/src/app/api/adapters/test-ssh/route.ts b/src/app/api/adapters/test-ssh/route.ts index 12124f5..11456d0 100644 --- a/src/app/api/adapters/test-ssh/route.ts +++ b/src/app/api/adapters/test-ssh/route.ts @@ -4,6 +4,8 @@ import { getAuthContext, checkPermissionWithContext } from "@/lib/access-control import { PERMISSIONS } from "@/lib/permissions"; import { MssqlSshTransfer } from "@/lib/adapters/database/mssql/ssh-transfer"; import { MSSQLConfig } from "@/lib/adapters/definitions"; +import { SshClient } from "@/lib/ssh"; +import { extractSshConfig } from "@/lib/ssh"; import { logger } from "@/lib/logger"; import { wrapError } from "@/lib/errors"; @@ -20,7 +22,7 @@ export async function POST(req: NextRequest) { try { const body = await req.json(); - const { config } = body as { config: MSSQLConfig }; + const { config } = body as { config: Record }; if (!config) { return NextResponse.json( @@ -36,58 +38,111 @@ export async function POST(req: NextRequest) { ); } - const sshTransfer = new MssqlSshTransfer(); const sshHost = config.sshHost || config.host; const sshPort = config.sshPort || 22; - try { - await sshTransfer.connect(config); - - // Test read/write on backup path if configured - const backupPath = config.backupPath || "/var/opt/mssql/backup"; - const pathResult = await sshTransfer.testBackupPath(backupPath); + // MSSQL uses SFTP-based SSH test (backup path check) + if (config.fileTransferMode === "ssh") { + return testMssqlSsh(config as MSSQLConfig, sshHost, sshPort); + } - sshTransfer.end(); + // Generic SSH connection test for all other adapters + return testGenericSsh(config, sshHost, sshPort); + } catch (error: unknown) { + log.error("SSH test route error", {}, wrapError(error)); + const message = + error instanceof Error ? error.message : "Unknown error"; + return NextResponse.json( + { success: false, message }, + { status: 500 } + ); + } +} - if (!pathResult.readable) { - return NextResponse.json({ - success: false, - message: `SSH connection to ${sshHost}:${sshPort} successful, but backup path is not accessible: ${backupPath}`, - }); - } +/** + * Generic SSH test: connect and run a simple echo command. + */ +async function testGenericSsh(config: Record, sshHost: string, sshPort: number) { + const sshConfig = extractSshConfig({ ...config, connectionMode: "ssh" }); + if (!sshConfig) { + return NextResponse.json( + { success: false, message: "Invalid SSH configuration" }, + { status: 400 } + ); + } - if (!pathResult.writable) { - return NextResponse.json({ - success: false, - message: `SSH connection to ${sshHost}:${sshPort} successful, but backup path is read-only: ${backupPath}`, - }); - } + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const result = await ssh.exec("echo connected"); + if (result.code === 0) { return NextResponse.json({ success: true, - message: `SSH connection to ${sshHost}:${sshPort} successful โ€” backup path ${backupPath} is readable and writable`, + message: `SSH connection to ${sshHost}:${sshPort} successful`, }); - } catch (connectError: unknown) { - sshTransfer.end(); - const message = - connectError instanceof Error - ? connectError.message - : "SSH connection failed"; + } + + return NextResponse.json({ + success: false, + message: `SSH connected but test command failed: ${result.stderr}`, + }); + } catch (connectError: unknown) { + const message = + connectError instanceof Error + ? connectError.message + : "SSH connection failed"; + log.warn("SSH test failed", { sshHost }, wrapError(connectError)); + return NextResponse.json({ success: false, message }); + } finally { + ssh.end(); + } +} + +/** + * MSSQL-specific SSH test: SFTP connect + backup path check. + */ +async function testMssqlSsh(config: MSSQLConfig, sshHost: string, sshPort: number) { + const sshTransfer = new MssqlSshTransfer(); + + try { + await sshTransfer.connect(config); + + const backupPath = config.backupPath || "/var/opt/mssql/backup"; + const pathResult = await sshTransfer.testBackupPath(backupPath); - log.warn("SSH test failed", { sshHost }, wrapError(connectError)); + sshTransfer.end(); + if (!pathResult.readable) { return NextResponse.json({ success: false, - message, + message: `SSH connection to ${sshHost}:${sshPort} successful, but backup path is not accessible: ${backupPath}`, }); } - } catch (error: unknown) { - log.error("SSH test route error", {}, wrapError(error)); + + if (!pathResult.writable) { + return NextResponse.json({ + success: false, + message: `SSH connection to ${sshHost}:${sshPort} successful, but backup path is read-only: ${backupPath}`, + }); + } + + return NextResponse.json({ + success: true, + message: `SSH connection to ${sshHost}:${sshPort} successful โ€” backup path ${backupPath} is readable and writable`, + }); + } catch (connectError: unknown) { + sshTransfer.end(); const message = - error instanceof Error ? error.message : "Unknown error"; - return NextResponse.json( - { success: false, message }, - { status: 500 } - ); + connectError instanceof Error + ? connectError.message + : "SSH connection failed"; + + log.warn("SSH test failed", { sshHost }, wrapError(connectError)); + + return NextResponse.json({ + success: false, + message, + }); } } diff --git a/src/components/adapter/form-sections.tsx b/src/components/adapter/form-sections.tsx index 6104ad9..230aa1e 100644 --- a/src/components/adapter/form-sections.tsx +++ b/src/components/adapter/form-sections.tsx @@ -163,13 +163,24 @@ export function DatabaseFormContent({ const isMSSQL = adapter.id === "mssql"; const fileTransferMode = watch("config.fileTransferMode"); const sshAuthType = watch("config.sshAuthType"); + const connectionMode = watch("config.connectionMode"); + + // Adapters that support SSH connection mode (have connectionMode field in schema) + const hasSSH = adapter.configSchema.shape && "connectionMode" in adapter.configSchema.shape && !isMSSQL; + const showTabs = isMSSQL || hasSSH; + const tabCount = 2 + (isMSSQL ? 1 : 0) + (hasSSH ? 1 : 0); return ( - + Connection Configuration {isMSSQL && File Transfer} + {hasSSH && SSH} @@ -235,14 +246,24 @@ export function DatabaseFormContent({ )} )} + + {hasSSH && ( + + + {connectionMode === "ssh" && ( + + )} + + )} ); } /** - * SSH configuration section for MSSQL file transfer with integrated test button. + * SSH configuration section with integrated test button. + * Used by MSSQL (file transfer) and other database adapters (SSH exec). */ -function SshConfigSection({ adapter, sshAuthType }: { adapter: AdapterDefinition; sshAuthType: string }) { +function SshConfigSection({ adapter, sshAuthType, description }: { adapter: AdapterDefinition; sshAuthType: string; description?: string }) { const { getValues } = useFormContext(); const [isTestingSsh, setIsTestingSsh] = useState(false); @@ -275,7 +296,7 @@ function SshConfigSection({ adapter, sshAuthType }: { adapter: AdapterDefinition return (

- SSH credentials to download/upload .bak files from the SQL Server host. + {description || "SSH credentials to download/upload .bak files from the SQL Server host."}

diff --git a/src/lib/adapters/database/mongodb/connection.ts b/src/lib/adapters/database/mongodb/connection.ts index e28a47d..75abf99 100644 --- a/src/lib/adapters/database/mongodb/connection.ts +++ b/src/lib/adapters/database/mongodb/connection.ts @@ -1,5 +1,13 @@ import { MongoClient } from "mongodb"; import { MongoDBConfig } from "@/lib/adapters/definitions"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildMongoArgs, + remoteEnv, + remoteBinaryCheck, +} from "@/lib/ssh"; /** * Build MongoDB connection URI from config @@ -19,6 +27,30 @@ function buildConnectionUri(config: MongoDBConfig): string { } export async function test(config: MongoDBConfig): Promise<{ success: boolean; message: string; version?: string }> { + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const mongoshBin = await remoteBinaryCheck(ssh, "mongosh", "mongo"); + const args = buildMongoArgs(config); + + const cmd = `${mongoshBin} ${args.join(" ")} --quiet --eval "db.adminCommand({buildInfo:1}).version"`; + const result = await ssh.exec(cmd); + + if (result.code === 0) { + const version = result.stdout.trim(); + return { success: true, message: "Connection successful (via SSH)", version }; + } + return { success: false, message: `SSH MongoDB test failed: ${result.stderr}` }; + } catch (error: unknown) { + const msg = error instanceof Error ? error.message : String(error); + return { success: false, message: `SSH connection failed: ${msg}` }; + } finally { + ssh.end(); + } + } + let client: MongoClient | null = null; try { @@ -50,6 +82,28 @@ export async function test(config: MongoDBConfig): Promise<{ success: boolean; m } export async function getDatabases(config: MongoDBConfig): Promise { + const sysDbs = ["admin", "config", "local"]; + + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const mongoshBin = await remoteBinaryCheck(ssh, "mongosh", "mongo"); + const args = buildMongoArgs(config); + + const cmd = `${mongoshBin} ${args.join(" ")} --quiet --eval "db.adminCommand({listDatabases:1}).databases.map(d=>d.name).join('\\n')"`; + const result = await ssh.exec(cmd); + + if (result.code !== 0) { + throw new Error(`Failed to list databases: ${result.stderr}`); + } + return result.stdout.split('\n').map(s => s.trim()).filter(s => s && !sysDbs.includes(s)); + } finally { + ssh.end(); + } + } + let client: MongoClient | null = null; try { @@ -64,7 +118,6 @@ export async function getDatabases(config: MongoDBConfig): Promise { const adminDb = client.db("admin"); const result = await adminDb.command({ listDatabases: 1 }); - const sysDbs = ["admin", "config", "local"]; return result.databases .map((db: { name: string }) => db.name) .filter((name: string) => !sysDbs.includes(name)); @@ -81,6 +134,41 @@ export async function getDatabases(config: MongoDBConfig): Promise { import { DatabaseInfo } from "@/lib/core/interfaces"; export async function getDatabasesWithStats(config: MongoDBConfig): Promise { + const sysDbs = ["admin", "config", "local"]; + + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const mongoshBin = await remoteBinaryCheck(ssh, "mongosh", "mongo"); + const args = buildMongoArgs(config); + + // Use a JS script to output tab-separated name\tsize\tcollectionCount + const script = `db.adminCommand({listDatabases:1}).databases.filter(d=>!['admin','config','local'].includes(d.name)).forEach(d=>{let c=0;try{c=db.getSiblingDB(d.name).getCollectionNames().length}catch(e){}print(d.name+'\\t'+(d.sizeOnDisk||0)+'\\t'+c)})`; + const cmd = `${mongoshBin} ${args.join(" ")} --quiet --eval "${script}"`; + const result = await ssh.exec(cmd); + + if (result.code !== 0) { + throw new Error(`Failed to get database stats: ${result.stderr}`); + } + return result.stdout + .split('\n') + .map(line => line.trim()) + .filter(line => line) + .map(line => { + const [name, sizeStr, tableStr] = line.split('\t'); + return { + name, + sizeInBytes: parseInt(sizeStr, 10) || 0, + tableCount: parseInt(tableStr, 10) || 0, + }; + }); + } finally { + ssh.end(); + } + } + let client: MongoClient | null = null; try { diff --git a/src/lib/adapters/database/mongodb/dump.ts b/src/lib/adapters/database/mongodb/dump.ts index 4da4e74..f6de575 100644 --- a/src/lib/adapters/database/mongodb/dump.ts +++ b/src/lib/adapters/database/mongodb/dump.ts @@ -13,6 +13,14 @@ import { } from "../common/tar-utils"; import { TarFileEntry, TarManifest } from "../common/types"; import { MongoDBConfig } from "@/lib/adapters/definitions"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildMongoArgs, + remoteBinaryCheck, + shellEscape, +} from "@/lib/ssh"; /** * Extended MongoDB config for dump operations with runtime fields @@ -30,6 +38,10 @@ async function dumpSingleDatabase( config: MongoDBDumpConfig, log: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void ): Promise { + if (isSSHMode(config)) { + return dumpSingleDatabaseSSH(dbName, outputPath, config, log); + } + const args: string[] = []; if (config.uri) { @@ -78,6 +90,66 @@ async function dumpSingleDatabase( await waitForProcess(dumpProcess, 'mongodump'); } +/** + * SSH variant: run mongodump on the remote server with --archive to stdout, stream back. + */ +async function dumpSingleDatabaseSSH( + dbName: string, + outputPath: string, + config: MongoDBDumpConfig, + log: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void +): Promise { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + await ssh.connect(sshConfig); + + try { + const mongodumpBin = await remoteBinaryCheck(ssh, "mongodump"); + const args = buildMongoArgs(config); + + args.push("--db", shellEscape(dbName)); + args.push("--archive"); // stdout mode + args.push("--gzip"); + + if (config.options) { + const parts = config.options.match(/[^\s"']+|"([^"]*)"|'([^']*)'/g) || []; + for (const part of parts) { + if (part.startsWith('"') && part.endsWith('"')) args.push(part.slice(1, -1)); + else if (part.startsWith("'") && part.endsWith("'")) args.push(part.slice(1, -1)); + else args.push(part); + } + } + + const cmd = `${mongodumpBin} ${args.join(" ")}`; + log(`Dumping database (SSH): ${dbName}`, 'info', 'command', `mongodump ${args.join(' ').replace(config.password || '___NONE___', '******')}`); + + const writeStream = createWriteStream(outputPath); + + await new Promise((resolve, reject) => { + ssh.execStream(cmd, (err, stream) => { + if (err) return reject(err); + + stream.pipe(writeStream); + + stream.stderr.on('data', (data: any) => { + const msg = data.toString().trim(); + if (msg) log(msg, 'info'); + }); + + stream.on('exit', (code: number) => { + if (code === 0) resolve(); + else reject(new Error(`Remote mongodump exited with code ${code}`)); + }); + + stream.on('error', (err: Error) => reject(err)); + writeStream.on('error', (err: Error) => reject(err)); + }); + }); + } finally { + ssh.end(); + } +} + export async function dump( config: MongoDBDumpConfig, destinationPath: string, diff --git a/src/lib/adapters/database/mongodb/restore.ts b/src/lib/adapters/database/mongodb/restore.ts index 63e5fd3..4366ac0 100644 --- a/src/lib/adapters/database/mongodb/restore.ts +++ b/src/lib/adapters/database/mongodb/restore.ts @@ -14,6 +14,14 @@ import { shouldRestoreDatabase, getTargetDatabaseName, } from "../common/tar-utils"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildMongoArgs, + remoteBinaryCheck, + shellEscape, +} from "@/lib/ssh"; /** Extended config with optional privileged auth for restore operations */ type MongoDBRestoreConfig = MongoDBConfig & { @@ -48,6 +56,11 @@ function buildConnectionUri(config: MongoDBConfig): string { } export async function prepareRestore(config: MongoDBRestoreConfig, databases: string[]): Promise { + if (isSSHMode(config)) { + // In SSH mode, we trust mongorestore to create databases. Skip the permission check. + return; + } + // Determine credentials (privileged or standard) const usageConfig: MongoDBConfig = { ...config }; if (config.privilegedAuth) { @@ -99,6 +112,10 @@ async function restoreSingleDatabase( log: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void, fromStdin: boolean = false ): Promise { + if (isSSHMode(config)) { + return restoreSingleDatabaseSSH(sourcePath, targetDb, sourceDb, config, log); + } + const args: string[] = []; if (config.uri) { @@ -161,6 +178,66 @@ async function restoreSingleDatabase( await waitForProcess(restoreProcess, 'mongorestore'); } +/** + * SSH variant: pipe local archive to remote mongorestore via SSH stdin. + */ +async function restoreSingleDatabaseSSH( + sourcePath: string, + targetDb: string | undefined, + sourceDb: string | undefined, + config: MongoDBRestoreConfig, + log: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void +): Promise { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + await ssh.connect(sshConfig); + + try { + const mongorestoreBin = await remoteBinaryCheck(ssh, "mongorestore"); + const args = buildMongoArgs(config); + + args.push("--archive"); // read from stdin + args.push("--gzip"); + args.push("--drop"); + + if (sourceDb && targetDb && sourceDb !== targetDb) { + args.push("--nsFrom", shellEscape(`${sourceDb}.*`)); + args.push("--nsTo", shellEscape(`${targetDb}.*`)); + log(`Remapping database: ${sourceDb} -> ${targetDb}`, 'info'); + } else if (targetDb) { + args.push("--nsInclude", shellEscape(`${targetDb}.*`)); + } + + const cmd = `${mongorestoreBin} ${args.join(" ")}`; + log(`Restoring database (SSH)`, 'info', 'command', `mongorestore ${args.join(' ').replace(config.password || '___NONE___', '******')}`); + + const fileStream = createReadStream(sourcePath); + + await new Promise((resolve, reject) => { + ssh.execStream(cmd, (err, stream) => { + if (err) return reject(err); + + stream.stderr.on('data', (data: any) => { + const msg = data.toString().trim(); + if (msg) log(`[mongorestore] ${msg}`, 'info'); + }); + + stream.on('exit', (code: number) => { + if (code === 0) resolve(); + else reject(new Error(`Remote mongorestore exited with code ${code}`)); + }); + + stream.on('error', (err: Error) => reject(err)); + fileStream.on('error', (err: Error) => reject(err)); + + fileStream.pipe(stream); + }); + }); + } finally { + ssh.end(); + } +} + export async function restore( config: MongoDBRestoreConfig, sourcePath: string, diff --git a/src/lib/adapters/database/mysql/connection.ts b/src/lib/adapters/database/mysql/connection.ts index 6ed0491..7563387 100644 --- a/src/lib/adapters/database/mysql/connection.ts +++ b/src/lib/adapters/database/mysql/connection.ts @@ -2,10 +2,56 @@ import { execFile } from "child_process"; import util from "util"; import { getMysqlCommand, getMysqladminCommand } from "./tools"; import { MySQLConfig } from "@/lib/adapters/definitions"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildMysqlArgs, + remoteEnv, + remoteBinaryCheck, + shellEscape, +} from "@/lib/ssh"; export const execFileAsync = util.promisify(execFile); export async function ensureDatabase(config: MySQLConfig, dbName: string, user: string, pass: string | undefined, privileged: boolean, logs: string[]) { + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const mysqlBin = await remoteBinaryCheck(ssh, "mariadb", "mysql"); + const args = buildMysqlArgs(config, user); + const env: Record = {}; + if (pass) env.MYSQL_PWD = pass; + + const createCmd = remoteEnv(env, `${mysqlBin} ${args.join(" ")} -e ${shellEscape(`CREATE DATABASE IF NOT EXISTS \`${dbName}\``)}`); + const result = await ssh.exec(createCmd); + if (result.code !== 0) { + logs.push(`Warning ensures DB '${dbName}': ${result.stderr}`); + return; + } + logs.push(`Database '${dbName}' ensured.`); + + if (privileged) { + const grantQuery = `GRANT ALL PRIVILEGES ON \`${dbName}\`.* TO '${config.user}'@'%'; GRANT ALL PRIVILEGES ON \`${dbName}\`.* TO '${config.user}'@'localhost'; FLUSH PRIVILEGES;`; + const grantCmd = remoteEnv(env, `${mysqlBin} ${args.join(" ")} -e ${shellEscape(grantQuery)}`); + const grantResult = await ssh.exec(grantCmd); + if (grantResult.code === 0) { + logs.push(`Permissions granted for '${dbName}'.`); + } else { + logs.push(`Warning grants for '${dbName}': ${grantResult.stderr}`); + } + } + } catch (e: unknown) { + const message = e instanceof Error ? e.message : String(e); + logs.push(`Warning ensures DB '${dbName}': ${message}`); + } finally { + ssh.end(); + } + return; + } + const args = ['-h', config.host, '-P', String(config.port), '-u', user, '--protocol=tcp']; if (config.disableSsl) { args.push('--skip-ssl'); @@ -28,6 +74,47 @@ export async function ensureDatabase(config: MySQLConfig, dbName: string, user: } export async function test(config: MySQLConfig): Promise<{ success: boolean; message: string; version?: string }> { + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + + // Detect available binaries on remote + const mysqlBin = await remoteBinaryCheck(ssh, "mariadb", "mysql"); + const mysqladminBin = await remoteBinaryCheck(ssh, "mariadb-admin", "mysqladmin"); + + const args = buildMysqlArgs(config); + const env: Record = {}; + if (config.password) env.MYSQL_PWD = config.password; + + // 1. Ping test + const pingCmd = remoteEnv(env, `${mysqladminBin} ping ${args.join(" ")} --connect-timeout=10`); + const pingResult = await ssh.exec(pingCmd); + if (pingResult.code !== 0) { + return { success: false, message: `SSH ping failed: ${pingResult.stderr}` }; + } + + // 2. Version check + const versionCmd = remoteEnv(env, `${mysqlBin} ${args.join(" ")} -N -s -e 'SELECT VERSION()'`); + const versionResult = await ssh.exec(versionCmd); + if (versionResult.code !== 0) { + return { success: true, message: "Connection successful (via SSH, version unknown)" }; + } + + const rawVersion = versionResult.stdout.trim(); + const versionMatch = rawVersion.match(/^([\d.]+)/); + const version = versionMatch ? versionMatch[1] : rawVersion; + + return { success: true, message: "Connection successful (via SSH)", version }; + } catch (error: unknown) { + const msg = error instanceof Error ? error.message : String(error); + return { success: false, message: `SSH connection failed: ${msg}` }; + } finally { + ssh.end(); + } + } + try { // 1. Basic Ping Test // Increased timeout to 10s to handle heavy load during integration tests @@ -67,6 +154,29 @@ export async function test(config: MySQLConfig): Promise<{ success: boolean; mes } export async function getDatabases(config: MySQLConfig): Promise { + const sysDbs = ['information_schema', 'mysql', 'performance_schema', 'sys']; + + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const mysqlBin = await remoteBinaryCheck(ssh, "mariadb", "mysql"); + const args = buildMysqlArgs(config); + const env: Record = {}; + if (config.password) env.MYSQL_PWD = config.password; + + const cmd = remoteEnv(env, `${mysqlBin} ${args.join(" ")} -e 'SHOW DATABASES' --skip-column-names`); + const result = await ssh.exec(cmd); + if (result.code !== 0) { + throw new Error(`Failed to list databases: ${result.stderr}`); + } + return result.stdout.split('\n').map(s => s.trim()).filter(s => s && !sysDbs.includes(s)); + } finally { + ssh.end(); + } + } + const args = ['-h', config.host, '-P', String(config.port), '-u', config.user, '--protocol=tcp']; if (config.disableSsl) { args.push('--skip-ssl'); @@ -81,40 +191,24 @@ export async function getDatabases(config: MySQLConfig): Promise { args.push('-e', 'SHOW DATABASES', '--skip-column-names'); const { stdout } = await execFileAsync(getMysqlCommand(), args, { env }); - const sysDbs = ['information_schema', 'mysql', 'performance_schema', 'sys']; return stdout.split('\n').map(s => s.trim()).filter(s => s && !sysDbs.includes(s)); } import { DatabaseInfo } from "@/lib/core/interfaces"; -export async function getDatabasesWithStats(config: MySQLConfig): Promise { - const args = ['-h', config.host, '-P', String(config.port), '-u', config.user, '--protocol=tcp']; - if (config.disableSsl) { - args.push('--skip-ssl'); - } - - const env = { ...process.env }; - if (config.password) { - env.MYSQL_PWD = config.password; - } - - // Query database sizes and table counts from information_schema - const query = ` - SELECT - s.schema_name AS db_name, - COALESCE(SUM(t.data_length + t.index_length), 0) AS size_bytes, - COUNT(t.table_name) AS table_count - FROM information_schema.schemata s - LEFT JOIN information_schema.tables t ON s.schema_name = t.table_schema - WHERE s.schema_name NOT IN ('information_schema', 'mysql', 'performance_schema', 'sys') - GROUP BY s.schema_name - ORDER BY s.schema_name; - `.trim(); - - args.push('-e', query, '--skip-column-names', '--batch'); - - const { stdout } = await execFileAsync(getMysqlCommand(), args, { env }); +const statsQuery = ` + SELECT + s.schema_name AS db_name, + COALESCE(SUM(t.data_length + t.index_length), 0) AS size_bytes, + COUNT(t.table_name) AS table_count + FROM information_schema.schemata s + LEFT JOIN information_schema.tables t ON s.schema_name = t.table_schema + WHERE s.schema_name NOT IN ('information_schema', 'mysql', 'performance_schema', 'sys') + GROUP BY s.schema_name + ORDER BY s.schema_name; +`.trim(); +function parseStatsOutput(stdout: string): DatabaseInfo[] { return stdout .split('\n') .map(line => line.trim()) @@ -128,3 +222,41 @@ export async function getDatabasesWithStats(config: MySQLConfig): Promise { + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const mysqlBin = await remoteBinaryCheck(ssh, "mariadb", "mysql"); + const args = buildMysqlArgs(config); + const env: Record = {}; + if (config.password) env.MYSQL_PWD = config.password; + + const cmd = remoteEnv(env, `${mysqlBin} ${args.join(" ")} -e ${shellEscape(statsQuery)} --skip-column-names --batch`); + const result = await ssh.exec(cmd); + if (result.code !== 0) { + throw new Error(`Failed to get database stats: ${result.stderr}`); + } + return parseStatsOutput(result.stdout); + } finally { + ssh.end(); + } + } + + const args = ['-h', config.host, '-P', String(config.port), '-u', config.user, '--protocol=tcp']; + if (config.disableSsl) { + args.push('--skip-ssl'); + } + + const env = { ...process.env }; + if (config.password) { + env.MYSQL_PWD = config.password; + } + + args.push('-e', statsQuery, '--skip-column-names', '--batch'); + + const { stdout } = await execFileAsync(getMysqlCommand(), args, { env }); + return parseStatsOutput(stdout); +} diff --git a/src/lib/adapters/database/mysql/dump.ts b/src/lib/adapters/database/mysql/dump.ts index 401028a..cb82032 100644 --- a/src/lib/adapters/database/mysql/dump.ts +++ b/src/lib/adapters/database/mysql/dump.ts @@ -13,6 +13,15 @@ import { cleanupTempDir, } from "../common/tar-utils"; import { TarFileEntry } from "../common/types"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildMysqlArgs, + remoteEnv, + remoteBinaryCheck, + shellEscape, +} from "@/lib/ssh"; /** Extended config with runtime fields */ type MySQLDumpConfig = (MySQLConfig | MariaDBConfig) & { @@ -29,6 +38,10 @@ async function dumpSingleDatabase( destinationPath: string, onLog: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void ): Promise<{ success: boolean; size: number }> { + if (isSSHMode(config)) { + return dumpSingleDatabaseSSH(config, dbName, destinationPath, onLog); + } + const dialect = getDialect(config.type === 'mariadb' ? 'mariadb' : 'mysql', config.detectedVersion); const args = dialect.getDumpArgs(config, [dbName]); @@ -69,6 +82,71 @@ async function dumpSingleDatabase( return { success: true, size: stats.size }; } +/** + * SSH variant: run mysqldump on the remote server and stream output to a local file. + */ +async function dumpSingleDatabaseSSH( + config: MySQLDumpConfig, + dbName: string, + destinationPath: string, + onLog: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void +): Promise<{ success: boolean; size: number }> { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + await ssh.connect(sshConfig); + + try { + const dumpBin = await remoteBinaryCheck(ssh, "mariadb-dump", "mysqldump"); + const args = buildMysqlArgs(config); + + // Add dump-specific options + if ((config as any).options) { + args.push(...(config as any).options.split(' ').filter((s: string) => s.trim().length > 0)); + } + args.push("--databases", shellEscape(dbName)); + + const env: Record = {}; + if (config.password) env.MYSQL_PWD = config.password; + + const cmd = remoteEnv(env, `${dumpBin} ${args.join(" ")}`); + const safeCmd = cmd.replace(config.password || '___NONE___', '******'); + onLog(`Dumping database (SSH): ${dbName}`, 'info', 'command', safeCmd); + + const writeStream = createWriteStream(destinationPath); + + await new Promise((resolve, reject) => { + ssh.execStream(cmd, (err, stream) => { + if (err) return reject(err); + + stream.pipe(writeStream); + + stream.stderr.on('data', (data: any) => { + const msg = data.toString().trim(); + if (msg.includes("Using a password") || msg.includes("Deprecated program name")) return; + onLog(msg); + }); + + stream.on('exit', (code: number) => { + if (code === 0) resolve(); + else reject(new Error(`Remote mysqldump exited with code ${code}`)); + }); + + stream.on('error', (err: Error) => reject(err)); + writeStream.on('error', (err: Error) => reject(err)); + }); + }); + + const stats = await fs.stat(destinationPath); + if (stats.size === 0) { + throw new Error(`Dump file for ${dbName} is empty. Check logs/permissions.`); + } + + return { success: true, size: stats.size }; + } finally { + ssh.end(); + } +} + export async function dump(config: MySQLDumpConfig, destinationPath: string, onLog?: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void, _onProgress?: (percentage: number) => void): Promise { const startedAt = new Date(); const logs: string[] = []; diff --git a/src/lib/adapters/database/mysql/restore.ts b/src/lib/adapters/database/mysql/restore.ts index 5993cbc..e5d1b5b 100644 --- a/src/lib/adapters/database/mysql/restore.ts +++ b/src/lib/adapters/database/mysql/restore.ts @@ -17,6 +17,15 @@ import { shouldRestoreDatabase, getTargetDatabaseName, } from "../common/tar-utils"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildMysqlArgs, + remoteEnv, + remoteBinaryCheck, + shellEscape, +} from "@/lib/ssh"; /** Extended config with runtime fields for restore operations */ type MySQLRestoreConfig = (MySQLConfig | MariaDBConfig) & { @@ -47,6 +56,10 @@ async function restoreSingleFile( onLog: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void, onProgress?: (percentage: number) => void ): Promise { + if (isSSHMode(config)) { + return restoreSingleFileSSH(config, sourcePath, targetDb, onLog, onProgress); + } + const stats = await fs.stat(sourcePath); const totalSize = stats.size; let processedSize = 0; @@ -86,6 +99,75 @@ async function restoreSingleFile( }); } +/** + * SSH variant: pipe local SQL file to remote mysql client via SSH. + */ +async function restoreSingleFileSSH( + config: MySQLRestoreConfig, + sourcePath: string, + targetDb: string, + onLog: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void, + onProgress?: (percentage: number) => void +): Promise { + const stats = await fs.stat(sourcePath); + const totalSize = stats.size; + let processedSize = 0; + let lastProgress = 0; + + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + await ssh.connect(sshConfig); + + try { + const mysqlBin = await remoteBinaryCheck(ssh, "mariadb", "mysql"); + const args = buildMysqlArgs(config); + args.push(shellEscape(targetDb)); + + const env: Record = {}; + if (config.password) env.MYSQL_PWD = config.password; + + const cmd = remoteEnv(env, `${mysqlBin} ${args.join(" ")}`); + onLog(`Restoring to database (SSH): ${targetDb}`, 'info', 'command', `${mysqlBin} ${args.join(" ")}`); + + const fileStream = createReadStream(sourcePath, { highWaterMark: 64 * 1024 }); + + fileStream.on('data', (chunk) => { + if (onProgress && totalSize > 0) { + processedSize += chunk.length; + const p = Math.round((processedSize / totalSize) * 100); + if (p > lastProgress) { + lastProgress = p; + onProgress(p); + } + } + }); + + await new Promise((resolve, reject) => { + ssh.execStream(cmd, (err, stream) => { + if (err) return reject(err); + + stream.stderr.on('data', (data: any) => { + const msg = data.toString().trim(); + if (msg.includes("Using a password") || msg.includes("Deprecated program name")) return; + onLog(`MySQL: ${msg}`); + }); + + stream.on('exit', (code: number) => { + if (code === 0) resolve(); + else reject(new Error(`Remote mysql exited with code ${code}`)); + }); + + stream.on('error', (err: Error) => reject(err)); + fileStream.on('error', (err: Error) => reject(err)); + + fileStream.pipe(stream); + }); + }); + } finally { + ssh.end(); + } +} + export async function restore(config: MySQLRestoreConfig, sourcePath: string, onLog?: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void, onProgress?: (percentage: number) => void): Promise { const startedAt = new Date(); const logs: string[] = []; diff --git a/src/lib/adapters/database/postgres/connection.ts b/src/lib/adapters/database/postgres/connection.ts index 5f838fd..31886a8 100644 --- a/src/lib/adapters/database/postgres/connection.ts +++ b/src/lib/adapters/database/postgres/connection.ts @@ -1,10 +1,51 @@ import { execFile } from "child_process"; import util from "util"; import { PostgresConfig } from "@/lib/adapters/definitions"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildPsqlArgs, + remoteEnv, + remoteBinaryCheck, + shellEscape, +} from "@/lib/ssh"; export const execFileAsync = util.promisify(execFile); export async function test(config: PostgresConfig): Promise<{ success: boolean; message: string; version?: string }> { + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + await remoteBinaryCheck(ssh, "psql"); + const args = buildPsqlArgs(config); + const env: Record = {}; + if (config.password) env.PGPASSWORD = config.password; + + const dbsToTry = ['postgres', 'template1']; + if (typeof config.database === 'string' && config.database) dbsToTry.push(config.database); + + for (const db of dbsToTry) { + const cmd = remoteEnv(env, `psql ${args.join(" ")} -d ${shellEscape(db)} -t -c 'SELECT version()'`); + const result = await ssh.exec(cmd); + if (result.code === 0) { + const rawVersion = result.stdout.trim(); + const versionMatch = rawVersion.match(/PostgreSQL\s+([\d.]+)/); + const version = versionMatch ? versionMatch[1] : rawVersion; + return { success: true, message: "Connection successful (via SSH)", version }; + } + } + return { success: false, message: "SSH connection to PostgreSQL failed" }; + } catch (error: unknown) { + const msg = error instanceof Error ? error.message : String(error); + return { success: false, message: `SSH connection failed: ${msg}` }; + } finally { + ssh.end(); + } + } + const dbsToTry = ['postgres', 'template1']; if (typeof config.database === 'string' && config.database) dbsToTry.push(config.database); @@ -33,6 +74,31 @@ export async function test(config: PostgresConfig): Promise<{ success: boolean; } export async function getDatabases(config: PostgresConfig): Promise { + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const args = buildPsqlArgs(config); + const env: Record = {}; + if (config.password) env.PGPASSWORD = config.password; + + const dbsToTry = ['postgres', 'template1']; + if (typeof config.database === 'string' && config.database) dbsToTry.push(config.database); + + for (const db of dbsToTry) { + const cmd = remoteEnv(env, `psql ${args.join(" ")} -d ${shellEscape(db)} -t -A -c 'SELECT datname FROM pg_database WHERE datistemplate = false;'`); + const result = await ssh.exec(cmd); + if (result.code === 0) { + return result.stdout.split('\n').map(s => s.trim()).filter(s => s); + } + } + throw new Error("Failed to list databases via SSH"); + } finally { + ssh.end(); + } + } + const dbsToTry = ['postgres', 'template1']; if (typeof config.database === 'string' && config.database) dbsToTry.push(config.database); @@ -54,41 +120,62 @@ export async function getDatabases(config: PostgresConfig): Promise { import { DatabaseInfo } from "@/lib/core/interfaces"; +const pgStatsQuery = ` + SELECT d.datname, pg_database_size(d.datname) AS size_bytes, (SELECT count(*) FROM information_schema.tables WHERE table_catalog = d.datname AND table_schema NOT IN ('pg_catalog', 'information_schema')) AS table_count FROM pg_database d WHERE d.datistemplate = false ORDER BY d.datname; +`.trim(); + +function parseStatsOutput(stdout: string): DatabaseInfo[] { + return stdout + .split('\n') + .map(line => line.trim()) + .filter(line => line) + .map(line => { + const parts = line.split('\t'); + return { + name: parts[0], + sizeInBytes: parseInt(parts[1], 10) || 0, + tableCount: parseInt(parts[2], 10) || 0, + }; + }); +} + export async function getDatabasesWithStats(config: PostgresConfig): Promise { + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const args = buildPsqlArgs(config); + const env: Record = {}; + if (config.password) env.PGPASSWORD = config.password; + + const dbsToTry = ['postgres', 'template1']; + if (typeof config.database === 'string' && config.database) dbsToTry.push(config.database); + + for (const db of dbsToTry) { + const cmd = remoteEnv(env, `psql ${args.join(" ")} -d ${shellEscape(db)} -t -A -F '\t' -c ${shellEscape(pgStatsQuery)}`); + const result = await ssh.exec(cmd); + if (result.code === 0) { + return parseStatsOutput(result.stdout); + } + } + throw new Error("Failed to get database stats via SSH"); + } finally { + ssh.end(); + } + } + const dbsToTry = ['postgres', 'template1']; if (typeof config.database === 'string' && config.database) dbsToTry.push(config.database); const env = { ...process.env, PGPASSWORD: config.password }; let lastError: unknown; - // Query database sizes and approximate table counts - const query = ` - SELECT - d.datname, - pg_database_size(d.datname) AS size_bytes, - (SELECT count(*) FROM information_schema.tables WHERE table_catalog = d.datname AND table_schema NOT IN ('pg_catalog', 'information_schema')) AS table_count - FROM pg_database d - WHERE d.datistemplate = false - ORDER BY d.datname; - `.trim().replace(/\n/g, ' '); - for (const db of dbsToTry) { try { - const args = ['-h', config.host, '-p', String(config.port), '-U', config.user, '-d', db, '-t', '-A', '-F', '\t', '-c', query]; + const args = ['-h', config.host, '-p', String(config.port), '-U', config.user, '-d', db, '-t', '-A', '-F', '\t', '-c', pgStatsQuery]; const { stdout } = await execFileAsync('psql', args, { env }); - - return stdout - .split('\n') - .map(line => line.trim()) - .filter(line => line) - .map(line => { - const parts = line.split('\t'); - return { - name: parts[0], - sizeInBytes: parseInt(parts[1], 10) || 0, - tableCount: parseInt(parts[2], 10) || 0, - }; - }); + return parseStatsOutput(stdout); } catch (error: unknown) { lastError = error; } diff --git a/src/lib/adapters/database/postgres/dump.ts b/src/lib/adapters/database/postgres/dump.ts index 629f2b4..a4d5b7c 100644 --- a/src/lib/adapters/database/postgres/dump.ts +++ b/src/lib/adapters/database/postgres/dump.ts @@ -13,6 +13,15 @@ import { } from "../common/tar-utils"; import { TarFileEntry, TarManifest } from "../common/types"; import { PostgresConfig } from "@/lib/adapters/definitions"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildPsqlArgs, + remoteEnv, + remoteBinaryCheck, + shellEscape, +} from "@/lib/ssh"; /** * Extended PostgreSQL config for dump operations with runtime fields @@ -31,6 +40,10 @@ async function dumpSingleDatabase( env: NodeJS.ProcessEnv, log: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void ): Promise { + if (isSSHMode(config)) { + return dumpSingleDatabaseSSH(dbName, outputPath, config, log); + } + const pgDumpBinary = await getPostgresBinary('pg_dump', config.detectedVersion); const args = [ @@ -80,6 +93,78 @@ async function dumpSingleDatabase( }); } +/** + * SSH variant: run pg_dump on the remote server and stream custom-format output to a local file. + */ +async function dumpSingleDatabaseSSH( + dbName: string, + outputPath: string, + config: PostgresDumpConfig, + log: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void +): Promise { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + await ssh.connect(sshConfig); + + try { + const pgDumpBin = await remoteBinaryCheck(ssh, "pg_dump"); + const args = buildPsqlArgs(config); + + const dumpArgs = [ + ...args, + "-F", "c", + "-Z", "6", + "-d", shellEscape(dbName), + ]; + + if (config.options) { + const parts = config.options.match(/[^\s"']+|"([^"]*)"|'([^']*)'/g) || []; + for (const part of parts) { + if (part.startsWith('"') && part.endsWith('"')) { + dumpArgs.push(part.slice(1, -1)); + } else if (part.startsWith("'") && part.endsWith("'")) { + dumpArgs.push(part.slice(1, -1)); + } else { + dumpArgs.push(part); + } + } + } + + const env: Record = {}; + if (config.password) env.PGPASSWORD = config.password; + + const cmd = remoteEnv(env, `${pgDumpBin} ${dumpArgs.join(" ")}`); + log(`Dumping database (SSH): ${dbName}`, 'info', 'command', `pg_dump ${dumpArgs.join(' ')}`); + + const writeStream = createWriteStream(outputPath); + + await new Promise((resolve, reject) => { + ssh.execStream(cmd, (err, stream) => { + if (err) return reject(err); + + stream.pipe(writeStream); + + stream.stderr.on('data', (data: any) => { + const msg = data.toString().trim(); + if (msg && !msg.includes('NOTICE:')) { + log(msg, 'info'); + } + }); + + stream.on('exit', (code: number) => { + if (code === 0) resolve(); + else reject(new Error(`Remote pg_dump for ${dbName} exited with code ${code}`)); + }); + + stream.on('error', (err: Error) => reject(err)); + writeStream.on('error', (err: Error) => reject(err)); + }); + }); + } finally { + ssh.end(); + } +} + export async function dump( config: PostgresDumpConfig, destinationPath: string, diff --git a/src/lib/adapters/database/postgres/restore.ts b/src/lib/adapters/database/postgres/restore.ts index 89f1ed8..8f7ed19 100644 --- a/src/lib/adapters/database/postgres/restore.ts +++ b/src/lib/adapters/database/postgres/restore.ts @@ -15,6 +15,17 @@ import { getTargetDatabaseName, } from "../common/tar-utils"; import { PostgresConfig } from "@/lib/adapters/definitions"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildPsqlArgs, + remoteEnv, + remoteBinaryCheck, + shellEscape, +} from "@/lib/ssh"; +import { randomUUID } from "crypto"; +import { createReadStream } from "fs"; /** * Extended PostgreSQL config for restore operations with runtime fields @@ -33,6 +44,10 @@ type PostgresRestoreConfig = PostgresConfig & { }; export async function prepareRestore(config: PostgresRestoreConfig, databases: string[]): Promise { + if (isSSHMode(config)) { + return prepareRestoreSSH(config, databases); + } + const usePrivileged = !!config.privilegedAuth; const user = usePrivileged ? config.privilegedAuth!.user : config.user; const pass = usePrivileged ? config.privilegedAuth!.password : config.password; @@ -71,6 +86,47 @@ export async function prepareRestore(config: PostgresRestoreConfig, databases: s } } +/** + * SSH variant: create databases on the remote server via psql. + */ +async function prepareRestoreSSH(config: PostgresRestoreConfig, databases: string[]): Promise { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + await ssh.connect(sshConfig); + + try { + const usePrivileged = !!config.privilegedAuth; + const user = usePrivileged ? config.privilegedAuth!.user : config.user; + const pass = usePrivileged ? config.privilegedAuth!.password : config.password; + + const args = buildPsqlArgs(config, user); + const env: Record = {}; + if (pass) env.PGPASSWORD = pass; + + for (const dbName of databases) { + const safeLiteral = dbName.replace(/'/g, "''"); + const checkCmd = remoteEnv(env, `psql ${args.join(" ")} -d postgres -t -A -c ${shellEscape(`SELECT 1 FROM pg_database WHERE datname = '${safeLiteral}'`)}`); + const checkResult = await ssh.exec(checkCmd); + if (checkResult.stdout.trim() === '1') continue; + + const safeDbName = `"${dbName.replace(/"/g, '""')}"`; + const createCmd = remoteEnv(env, `psql ${args.join(" ")} -d postgres -c ${shellEscape(`CREATE DATABASE ${safeDbName}`)}`); + const createResult = await ssh.exec(createCmd); + + if (createResult.code !== 0) { + const msg = createResult.stderr; + if (msg.includes("permission denied")) { + throw new Error(`Access denied for user '${user}' to create database '${dbName}'. User permissions?`); + } + if (msg.includes("already exists")) continue; + throw new Error(`Failed to create database '${dbName}': ${msg}`); + } + } + } finally { + ssh.end(); + } +} + /** * Detect if a backup file is in PostgreSQL custom format */ @@ -96,6 +152,10 @@ async function restoreSingleDatabase( env: NodeJS.ProcessEnv, log: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void ): Promise { + if (isSSHMode(config)) { + return restoreSingleDatabaseSSH(sourcePath, targetDb, config, log); + } + const pgRestoreBinary = await getPostgresBinary('pg_restore', config.detectedVersion); const args = [ @@ -165,6 +225,95 @@ async function restoreSingleDatabase( }); } +/** + * SSH variant: upload dump to remote temp file, run pg_restore there, then cleanup. + * pg_restore with custom format needs seekable input, so we can't just pipe stdin. + */ +async function restoreSingleDatabaseSSH( + sourcePath: string, + targetDb: string, + config: PostgresRestoreConfig, + log: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void +): Promise { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + await ssh.connect(sshConfig); + + const remoteTempFile = `/tmp/dbackup_restore_${randomUUID()}.dump`; + + try { + const pgRestoreBin = await remoteBinaryCheck(ssh, "pg_restore"); + const args = buildPsqlArgs(config); + + const env: Record = {}; + const priv = config.privilegedAuth; + const pass = (priv && priv.password) ? priv.password : config.password; + if (pass) env.PGPASSWORD = pass; + + // 1. Upload dump file to remote temp location + log(`Uploading dump to remote: ${remoteTempFile}`, 'info'); + const fileStream = createReadStream(sourcePath); + + await new Promise((resolve, reject) => { + ssh.execStream(`cat > ${shellEscape(remoteTempFile)}`, (err, stream) => { + if (err) return reject(err); + + stream.on('exit', (code: number) => { + if (code === 0) resolve(); + else reject(new Error(`Failed to upload dump file (code ${code})`)); + }); + + stream.on('error', (err: Error) => reject(err)); + fileStream.on('error', (err: Error) => reject(err)); + + fileStream.pipe(stream); + }); + }); + + // 2. Run pg_restore on the remote + const restoreArgs = [ + ...args, + "-d", shellEscape(targetDb), + "-w", + "--clean", + "--if-exists", + "--no-owner", + "--no-acl", + "--no-comments", + "--no-tablespaces", + "--no-security-labels", + "-v", + shellEscape(remoteTempFile), + ]; + + const cmd = remoteEnv(env, `${pgRestoreBin} ${restoreArgs.join(" ")}`); + log(`Restoring database (SSH): ${targetDb}`, 'info', 'command', `pg_restore ${restoreArgs.join(' ')}`); + + const result = await ssh.exec(cmd); + + if (result.code !== 0 && result.code !== 1) { + throw new Error(`Remote pg_restore exited with code ${result.code}. Error: ${result.stderr}`); + } + + if (result.code === 1 && result.stderr.includes('warning')) { + log('Restore completed with warnings (non-fatal)', 'warning'); + } + + if (result.stderr) { + const lines = result.stderr.trim().split('\n'); + for (const line of lines) { + if (line && !line.includes('NOTICE:')) { + log(line, 'info'); + } + } + } + } finally { + // 3. Cleanup remote temp file + await ssh.exec(`rm -f ${shellEscape(remoteTempFile)}`).catch(() => {}); + ssh.end(); + } +} + export async function restore( config: PostgresRestoreConfig, sourcePath: string, diff --git a/src/lib/adapters/database/redis/connection.ts b/src/lib/adapters/database/redis/connection.ts index 7ee6aef..aba2350 100644 --- a/src/lib/adapters/database/redis/connection.ts +++ b/src/lib/adapters/database/redis/connection.ts @@ -3,6 +3,13 @@ import util from "util"; import { logger } from "@/lib/logger"; import { wrapError } from "@/lib/errors"; import { RedisConfig } from "@/lib/adapters/definitions"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildRedisArgs, + remoteBinaryCheck, +} from "@/lib/ssh"; const execFileAsync = util.promisify(execFile); const log = logger.child({ adapter: "redis", module: "connection" }); @@ -41,6 +48,44 @@ function buildConnectionArgs(config: RedisConfig): string[] { * Test connection to Redis server */ export async function test(config: RedisConfig): Promise<{ success: boolean; message: string; version?: string }> { + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const redisBin = await remoteBinaryCheck(ssh, "redis-cli"); + const args = buildRedisArgs(config); + + // TLS flag + if ((config as any).tls) args.push("--tls"); + // Database selection + if (config.database !== undefined && config.database !== 0) { + args.push("-n", String(config.database)); + } + + // Ping test + const pingResult = await ssh.exec(`${redisBin} ${args.join(" ")} PING`); + if (pingResult.code !== 0 || !pingResult.stdout.includes("PONG")) { + return { success: false, message: `SSH Redis PING failed: ${pingResult.stderr || pingResult.stdout}` }; + } + + // Version info + const infoResult = await ssh.exec(`${redisBin} ${args.join(" ")} INFO server`); + let version: string | undefined; + if (infoResult.code === 0) { + const versionMatch = infoResult.stdout.match(/redis_version:([^\r\n]+)/); + version = versionMatch ? versionMatch[1].trim() : undefined; + } + + return { success: true, message: "Connection successful (via SSH)", version }; + } catch (error: unknown) { + const msg = error instanceof Error ? error.message : String(error); + return { success: false, message: `SSH connection failed: ${msg}` }; + } finally { + ssh.end(); + } + } + try { const args = buildConnectionArgs(config); @@ -83,6 +128,29 @@ export async function test(config: RedisConfig): Promise<{ success: boolean; mes * Note: Redis databases are always available, even if empty. */ export async function getDatabases(config: RedisConfig): Promise { + if (isSSHMode(config)) { + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + try { + await ssh.connect(sshConfig); + const redisBin = await remoteBinaryCheck(ssh, "redis-cli"); + const args = buildRedisArgs(config); + + const result = await ssh.exec(`${redisBin} ${args.join(" ")} CONFIG GET databases`); + if (result.code === 0) { + const lines = result.stdout.trim().split("\n"); + const maxDbs = parseInt(lines[1] || "16", 10); + return Array.from({ length: maxDbs }, (_, i) => String(i)); + } + return Array.from({ length: 16 }, (_, i) => String(i)); + } catch (error: unknown) { + log.error("Failed to get databases via SSH", {}, wrapError(error)); + return Array.from({ length: 16 }, (_, i) => String(i)); + } finally { + ssh.end(); + } + } + try { const baseArgs = buildConnectionArgs({ ...config, database: 0 }); diff --git a/src/lib/adapters/database/redis/dump.ts b/src/lib/adapters/database/redis/dump.ts index d7ea8ff..3c9bc6c 100644 --- a/src/lib/adapters/database/redis/dump.ts +++ b/src/lib/adapters/database/redis/dump.ts @@ -2,8 +2,18 @@ import { BackupResult } from "@/lib/core/interfaces"; import { LogLevel, LogType } from "@/lib/core/logs"; import { spawn } from "child_process"; import fs from "fs/promises"; +import { createWriteStream } from "fs"; import { buildConnectionArgs } from "./connection"; import { RedisConfig } from "@/lib/adapters/definitions"; +import { + SshClient, + isSSHMode, + extractSshConfig, + buildRedisArgs, + remoteBinaryCheck, + shellEscape, +} from "@/lib/ssh"; +import { randomUUID } from "crypto"; /** * Dump Redis database using RDB snapshot @@ -19,6 +29,10 @@ export async function dump( onLog?: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void, _onProgress?: (percentage: number) => void ): Promise { + if (isSSHMode(config)) { + return dumpSSH(config, destinationPath, onLog); + } + const startedAt = new Date(); const logs: string[] = []; @@ -98,3 +112,99 @@ export async function dump( }; } } + +/** + * SSH variant: run redis-cli --rdb on remote, then stream the file back. + * redis-cli --rdb writes to a file (not stdout), so we use a remote temp file. + */ +async function dumpSSH( + config: RedisConfig, + destinationPath: string, + onLog?: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void, +): Promise { + const startedAt = new Date(); + const logs: string[] = []; + const log = (msg: string, level: LogLevel = "info", type: LogType = "general", details?: string) => { + logs.push(msg); + if (onLog) onLog(msg, level, type, details); + }; + + const sshConfig = extractSshConfig(config)!; + const ssh = new SshClient(); + const remoteTempFile = `/tmp/dbackup_redis_${randomUUID()}.rdb`; + + try { + await ssh.connect(sshConfig); + const redisBin = await remoteBinaryCheck(ssh, "redis-cli"); + const args = buildRedisArgs(config); + + // TLS flag + if ((config as any).tls) args.push("--tls"); + // Database selection + if (config.database !== undefined && config.database !== 0) { + args.push("-n", String(config.database)); + } + + log("Starting Redis RDB backup (SSH)...", "info"); + + // 1. Run redis-cli --rdb on remote to create temp file + const rdbCmd = `${redisBin} ${args.join(" ")} --rdb ${shellEscape(remoteTempFile)}`; + log("Executing remote redis-cli --rdb", "info", "command", rdbCmd.replace(config.password || '___NONE___', '******')); + + const rdbResult = await ssh.exec(rdbCmd); + if (rdbResult.code !== 0) { + throw new Error(`Remote redis-cli --rdb failed: ${rdbResult.stderr}`); + } + + // 2. Stream remote file back to local + log("Streaming RDB file from remote...", "info"); + const writeStream = createWriteStream(destinationPath); + + await new Promise((resolve, reject) => { + ssh.execStream(`cat ${shellEscape(remoteTempFile)}`, (err, stream) => { + if (err) return reject(err); + + stream.pipe(writeStream); + + stream.on('exit', (code: number) => { + if (code === 0) resolve(); + else reject(new Error(`Failed to stream RDB from remote (code ${code})`)); + }); + + stream.on('error', (err: Error) => reject(err)); + writeStream.on('error', (err: Error) => reject(err)); + }); + }); + + // 3. Verify local file + const stats = await fs.stat(destinationPath); + if (stats.size === 0) { + throw new Error("RDB dump file is empty"); + } + + log(`RDB backup completed successfully via SSH (${stats.size} bytes)`, "success"); + + return { + success: true, + path: destinationPath, + size: stats.size, + logs, + startedAt, + completedAt: new Date(), + }; + } catch (error: unknown) { + const message = error instanceof Error ? error.message : String(error); + log(`Backup failed: ${message}`, "error"); + return { + success: false, + logs, + error: message, + startedAt, + completedAt: new Date(), + }; + } finally { + // Cleanup remote temp file + await ssh.exec(`rm -f ${shellEscape(remoteTempFile)}`).catch(() => {}); + ssh.end(); + } +} diff --git a/src/lib/adapters/database/sqlite/connection.ts b/src/lib/adapters/database/sqlite/connection.ts index 6eb2750..9ccd439 100644 --- a/src/lib/adapters/database/sqlite/connection.ts +++ b/src/lib/adapters/database/sqlite/connection.ts @@ -3,18 +3,10 @@ import fs from "fs/promises"; import { constants } from "fs"; import { execFile } from "child_process"; import { promisify } from "util"; -import { SshClient } from "./ssh-client"; +import { SshClient, shellEscape, extractSqliteSshConfig } from "@/lib/ssh"; const execFileAsync = promisify(execFile); -/** - * Escapes a value for safe inclusion in a single-quoted shell string. - * Handles embedded single quotes by ending the quote, adding an escaped quote, and re-opening. - */ -function shellEscape(value: string): string { - return "'" + value.replace(/'/g, "'\\''") + "'"; -} - export const test: DatabaseAdapter["test"] = async (config) => { try { const mode = config.mode || "local"; @@ -112,7 +104,9 @@ export const getDatabasesWithStats: DatabaseAdapter["getDatabasesWithStats"] = a } else if (mode === "ssh") { const client = new SshClient(); try { - await client.connect(config); + const sshConfig2 = extractSqliteSshConfig(config); + if (!sshConfig2) return [{ name, sizeInBytes: undefined, tableCount: undefined }]; + await client.connect(sshConfig2); // Get file size via stat const sizeResult = await client.exec(`stat -c %s ${shellEscape(dbPath)} 2>/dev/null || stat -f %z ${shellEscape(dbPath)} 2>/dev/null`); diff --git a/src/lib/adapters/database/sqlite/dump.ts b/src/lib/adapters/database/sqlite/dump.ts index 78d6338..0b709ea 100644 --- a/src/lib/adapters/database/sqlite/dump.ts +++ b/src/lib/adapters/database/sqlite/dump.ts @@ -1,16 +1,9 @@ import { DatabaseAdapter } from "@/lib/core/interfaces"; import { spawn } from "child_process"; import fs from "fs"; -import { SshClient } from "./ssh-client"; +import { SshClient, shellEscape, extractSqliteSshConfig } from "@/lib/ssh"; import { SQLiteConfig } from "@/lib/adapters/definitions"; -/** - * Escapes a value for safe inclusion in a single-quoted shell string. - */ -function shellEscape(value: string): string { - return "'" + value.replace(/'/g, "'\\''") + "'"; -} - export const dump: DatabaseAdapter["dump"] = async (config, destinationPath, onLog, onProgress) => { const startedAt = new Date(); const mode = config.mode || "local"; @@ -95,7 +88,9 @@ async function dumpSsh(config: SQLiteConfig, destinationPath: string, log: (msg: const binaryPath = config.sqliteBinaryPath || "sqlite3"; const dbPath = config.path; - await client.connect(config); + const sshConfig = extractSqliteSshConfig(config); + if (!sshConfig) throw new Error("SSH host and username are required"); + await client.connect(sshConfig); log("SSH connection established."); return new Promise((resolve, reject) => { @@ -114,7 +109,7 @@ async function dumpSsh(config: SQLiteConfig, destinationPath: string, log: (msg: log(`[Remote Stderr]: ${data.toString()}`); }); - stream.on("close", (code: number, _signal: any) => { + stream.on("exit", (code: number, _signal: any) => { client.end(); if (code === 0) { log("Remote dump completed successfully."); diff --git a/src/lib/adapters/database/sqlite/restore.ts b/src/lib/adapters/database/sqlite/restore.ts index 2fc119f..242b8d7 100644 --- a/src/lib/adapters/database/sqlite/restore.ts +++ b/src/lib/adapters/database/sqlite/restore.ts @@ -1,16 +1,9 @@ import { DatabaseAdapter } from "@/lib/core/interfaces"; import { spawn } from "child_process"; import fs from "fs"; -import { SshClient } from "./ssh-client"; +import { SshClient, shellEscape, extractSqliteSshConfig } from "@/lib/ssh"; import { SQLiteConfig } from "@/lib/adapters/definitions"; -/** - * Escapes a value for safe inclusion in a single-quoted shell string. - */ -function shellEscape(value: string): string { - return "'" + value.replace(/'/g, "'\\''") + "'"; -} - export const prepareRestore: DatabaseAdapter["prepareRestore"] = async (_config, _databases) => { // No major prep needed for SQLite mostly, but could check write permissions here }; @@ -118,7 +111,9 @@ async function restoreSsh(config: SQLiteConfig, sourcePath: string, log: (msg: s const binaryPath = config.sqliteBinaryPath || "sqlite3"; const dbPath = config.path; - await client.connect(config); + const sshConfig = extractSqliteSshConfig(config); + if (!sshConfig) throw new Error("SSH host and username are required"); + await client.connect(sshConfig); log("SSH connection established."); // Create remote backup and delete original @@ -156,7 +151,7 @@ async function restoreSsh(config: SQLiteConfig, sourcePath: string, log: (msg: s log(`[Remote Stderr]: ${data.toString()}`); }); - stream.on("close", (code: number, _signal: any) => { + stream.on("exit", (code: number, _signal: any) => { client.end(); if (code === 0) { log("Remote restore completed successfully."); diff --git a/src/lib/adapters/database/sqlite/ssh-client.ts b/src/lib/adapters/database/sqlite/ssh-client.ts index a366a01..4cc3161 100644 --- a/src/lib/adapters/database/sqlite/ssh-client.ts +++ b/src/lib/adapters/database/sqlite/ssh-client.ts @@ -1,73 +1,2 @@ -import { Client, ConnectConfig } from "ssh2"; -import { SQLiteConfig } from "@/lib/adapters/definitions"; - -export class SshClient { - private client: Client; - - constructor() { - this.client = new Client(); - } - - public connect(config: SQLiteConfig): Promise { - return new Promise((resolve, reject) => { - const sshConfig: ConnectConfig = { - host: config.host, - port: config.port, - username: config.username, - readyTimeout: 20000, - }; - - if (config.authType === 'privateKey') { - sshConfig.privateKey = config.privateKey; - if (config.passphrase) { - sshConfig.passphrase = config.passphrase; - } - } else if (config.authType === 'agent') { - sshConfig.agent = process.env.SSH_AUTH_SOCK; - } else { - // Default to password - sshConfig.password = config.password; - } - - this.client - .on("ready", () => { - resolve(); - }) - .on("error", (err) => { - reject(err); - }) - .connect(sshConfig); - }); - } - - public exec(command: string): Promise<{ stdout: string; stderr: string; code: number }> { - return new Promise((resolve, reject) => { - this.client.exec(command, (err, stream) => { - if (err) return reject(err); - - let stdout = ""; - let stderr = ""; - - stream - .on("close", (code: number, _signal: any) => { - resolve({ stdout, stderr, code }); - }) - .on("data", (data: any) => { - stdout += data.toString(); - }) - .stderr.on("data", (data: any) => { - stderr += data.toString(); - }); - }); - }); - } - - // Returns the raw stream for piping - public execStream(command: string, callback: (err: Error | undefined, stream: any) => void): void { - this.client.exec(command, callback); - } - - public end() { - this.client.end(); - } -} +// Re-export shared SSH client for backwards compatibility +export { SshClient } from "@/lib/ssh"; diff --git a/src/lib/adapters/definitions.ts b/src/lib/adapters/definitions.ts index 0adc7e3..94ab100 100644 --- a/src/lib/adapters/definitions.ts +++ b/src/lib/adapters/definitions.ts @@ -19,6 +19,18 @@ const safeBinaryPath = z.string().regex( "Binary path may only contain letters, digits, slashes, underscores, hyphens, and dots" ); +// Shared SSH fields for adapters that support SSH remote execution mode +const sshFields = { + connectionMode: z.enum(["direct", "ssh"]).default("direct").describe("Connection mode (direct TCP or via SSH)"), + sshHost: z.string().optional().describe("SSH host"), + sshPort: z.coerce.number().default(22).optional().describe("SSH port"), + sshUsername: z.string().optional().describe("SSH username"), + sshAuthType: z.enum(["password", "privateKey", "agent"]).default("password").optional().describe("SSH authentication method"), + sshPassword: z.string().optional().describe("SSH password"), + sshPrivateKey: z.string().optional().describe("SSH private key (PEM format)"), + sshPassphrase: z.string().optional().describe("Passphrase for SSH private key"), +}; + export const MySQLSchema = z.object({ host: z.string().default("localhost"), port: z.coerce.number().default(3306), @@ -27,6 +39,7 @@ export const MySQLSchema = z.object({ database: z.union([z.string(), z.array(z.string())]).default(""), options: z.string().optional().describe("Additional mysqldump options"), disableSsl: z.boolean().default(false).describe("Disable SSL (Use for self-signed development DBs)"), + ...sshFields, }); export const MariaDBSchema = z.object({ @@ -37,6 +50,7 @@ export const MariaDBSchema = z.object({ database: z.union([z.string(), z.array(z.string())]).default(""), options: z.string().optional().describe("Additional mariadb-dump options"), disableSsl: z.boolean().default(false).describe("Disable SSL (Use for self-signed development DBs)"), + ...sshFields, }); export const PostgresSchema = z.object({ @@ -46,6 +60,7 @@ export const PostgresSchema = z.object({ password: z.string().optional(), database: z.union([z.string(), z.array(z.string())]).default(""), options: z.string().optional().describe("Additional pg_dump options"), + ...sshFields, }); export const MongoDBSchema = z.object({ @@ -57,6 +72,7 @@ export const MongoDBSchema = z.object({ authenticationDatabase: z.string().default("admin").optional(), database: z.union([z.string(), z.array(z.string())]).default(""), options: z.string().optional().describe("Additional mongodump options"), + ...sshFields, }); export const SQLiteSchema = z.object({ @@ -109,6 +125,7 @@ export const RedisSchema = z.object({ sentinelMasterName: z.string().optional().describe("Master name for Sentinel mode"), sentinelNodes: z.string().optional().describe("Comma-separated sentinel nodes (host:port,host:port)"), options: z.string().optional().describe("Additional redis-cli options"), + ...sshFields, }); export const LocalStorageSchema = z.object({ diff --git a/src/lib/ssh/index.ts b/src/lib/ssh/index.ts new file mode 100644 index 0000000..3d6c983 --- /dev/null +++ b/src/lib/ssh/index.ts @@ -0,0 +1,14 @@ +export { SshClient } from "./ssh-client"; +export type { SshConnectionConfig } from "./ssh-client"; +export { + shellEscape, + remoteEnv, + remoteBinaryCheck, + isSSHMode, + extractSshConfig, + extractSqliteSshConfig, + buildMysqlArgs, + buildPsqlArgs, + buildMongoArgs, + buildRedisArgs, +} from "./utils"; diff --git a/src/lib/ssh/ssh-client.ts b/src/lib/ssh/ssh-client.ts new file mode 100644 index 0000000..e6cdedd --- /dev/null +++ b/src/lib/ssh/ssh-client.ts @@ -0,0 +1,94 @@ +import { Client, ConnectConfig } from "ssh2"; + +/** + * Generic SSH connection configuration used across all adapters. + */ +export interface SshConnectionConfig { + host: string; + port?: number; + username: string; + authType: "password" | "privateKey" | "agent"; + password?: string; + privateKey?: string; + passphrase?: string; +} + +/** + * Generic SSH client for remote command execution over SSH2. + * Extracted from the SQLite adapter for shared use across all database adapters. + */ +export class SshClient { + private client: Client; + + constructor() { + this.client = new Client(); + } + + public connect(config: SshConnectionConfig): Promise { + return new Promise((resolve, reject) => { + const sshConfig: ConnectConfig = { + host: config.host, + port: config.port ?? 22, + username: config.username, + readyTimeout: 20000, + keepaliveInterval: 10000, + keepaliveCountMax: 3, + }; + + if (config.authType === "privateKey") { + sshConfig.privateKey = config.privateKey; + if (config.passphrase) { + sshConfig.passphrase = config.passphrase; + } + } else if (config.authType === "agent") { + sshConfig.agent = process.env.SSH_AUTH_SOCK; + } else { + // Default to password + sshConfig.password = config.password; + } + + this.client + .on("ready", () => { + resolve(); + }) + .on("error", (err) => { + reject(err); + }) + .connect(sshConfig); + }); + } + + public exec(command: string): Promise<{ stdout: string; stderr: string; code: number }> { + return new Promise((resolve, reject) => { + this.client.exec(command, (err, stream) => { + if (err) return reject(err); + + let stdout = ""; + let stderr = ""; + + stream + .on("close", (code: number, _signal: any) => { + resolve({ stdout, stderr, code }); + }) + .on("data", (data: any) => { + stdout += data.toString(); + }) + .stderr.on("data", (data: any) => { + stderr += data.toString(); + }); + }); + }); + } + + /** + * Returns the raw SSH stream for piping (binary-safe). + * Use for streaming dump output or piping restore input. + */ + public execStream(command: string, callback: (err: Error | undefined, stream: any) => void): void { + this.client.exec(command, callback); + } + + public end(): void { + this.client.end(); + } +} diff --git a/src/lib/ssh/utils.ts b/src/lib/ssh/utils.ts new file mode 100644 index 0000000..3b43a59 --- /dev/null +++ b/src/lib/ssh/utils.ts @@ -0,0 +1,168 @@ +import { SshClient, SshConnectionConfig } from "./ssh-client"; + +/** + * Escapes a value for safe inclusion in a single-quoted shell string. + * Handles embedded single quotes by ending the quote, adding an escaped quote, and re-opening. + */ +export function shellEscape(value: string): string { + return "'" + value.replace(/'/g, "'\\''") + "'"; +} + +/** + * Build a remote command string with environment variables prepended. + * Variables are set inline so they don't leak into the shell history. + * + * Example: remoteEnv({ MYSQL_PWD: "secret" }, "mysqldump -h 127.0.0.1 mydb") + * โ†’ "MYSQL_PWD='secret' mysqldump -h 127.0.0.1 mydb" + */ +export function remoteEnv(vars: Record, command: string): string { + const parts: string[] = []; + for (const [key, value] of Object.entries(vars)) { + if (value !== undefined && value !== "") { + parts.push(`${key}=${shellEscape(value)}`); + } + } + if (parts.length === 0) return command; + return `${parts.join(" ")} ${command}`; +} + +/** + * Check if a binary is available on the remote server. + * Returns the resolved path or throws if not found. + */ +export async function remoteBinaryCheck( + client: SshClient, + ...candidates: string[] +): Promise { + for (const binary of candidates) { + const result = await client.exec(`command -v ${shellEscape(binary)} 2>/dev/null`); + if (result.code === 0 && result.stdout.trim()) { + return result.stdout.trim(); + } + } + throw new Error( + `Required binary not found on remote server. Tried: ${candidates.join(", ")}` + ); +} + +/** + * Check if an adapter config has SSH mode enabled. + * Works for configs with `connectionMode: "ssh"` field. + */ +export function isSSHMode(config: Record): boolean { + return config.connectionMode === "ssh"; +} + +/** + * Extract SSH connection config from an adapter config that uses + * the shared sshHost/sshPort/sshUsername/... field convention. + * Returns null if SSH mode is not enabled. + */ +export function extractSshConfig(config: Record): SshConnectionConfig | null { + if (!isSSHMode(config)) return null; + if (!config.sshHost || !config.sshUsername) return null; + + return { + host: config.sshHost, + port: config.sshPort ?? 22, + username: config.sshUsername, + authType: config.sshAuthType ?? "password", + password: config.sshPassword, + privateKey: config.sshPrivateKey, + passphrase: config.sshPassphrase, + }; +} + +/** + * Extract SSH connection config from a SQLite adapter config. + * SQLite uses direct field names (host, username, etc.) instead of the sshHost prefix convention. + */ +export function extractSqliteSshConfig(config: Record): SshConnectionConfig | null { + if (config.mode !== "ssh") return null; + if (!config.host || !config.username) return null; + + return { + host: config.host, + port: config.port ?? 22, + username: config.username, + authType: config.authType ?? "password", + password: config.password, + privateKey: config.privateKey, + passphrase: config.passphrase, + }; +} + +/** + * Build MySQL/MariaDB connection arguments for remote execution. + * Uses the DB host/port from the adapter config (connection within the SSH session). + */ +export function buildMysqlArgs(config: Record, user?: string): string[] { + const args = [ + "-h", shellEscape(config.host || "127.0.0.1"), + "-P", String(config.port || 3306), + "-u", shellEscape(user || config.user), + "--protocol=tcp", + ]; + if (config.disableSsl) { + args.push("--skip-ssl"); + } + return args; +} + +/** + * Build PostgreSQL connection arguments for remote execution. + */ +export function buildPsqlArgs(config: Record, user?: string): string[] { + return [ + "-h", shellEscape(config.host || "127.0.0.1"), + "-p", String(config.port || 5432), + "-U", shellEscape(user || config.user), + ]; +} + +/** + * Build MongoDB connection arguments for remote execution via mongosh/mongodump/mongorestore. + */ +export function buildMongoArgs(config: Record): string[] { + if (config.uri) { + return [`--uri=${shellEscape(config.uri)}`]; + } + + const args = [ + "--host", shellEscape(config.host || "127.0.0.1"), + "--port", String(config.port || 27017), + ]; + + if (config.user && config.password) { + args.push("--username", shellEscape(config.user)); + args.push("--password", shellEscape(config.password)); + args.push("--authenticationDatabase", shellEscape(config.authenticationDatabase || "admin")); + } + + return args; +} + +/** + * Build Redis connection arguments for remote execution. + */ +export function buildRedisArgs(config: Record): string[] { + const args = [ + "-h", shellEscape(config.host || "127.0.0.1"), + "-p", String(config.port || 6379), + ]; + + if (config.username) { + args.push("--user", shellEscape(config.username)); + } + if (config.password) { + args.push("-a", shellEscape(config.password)); + } + if (config.tls) { + args.push("--tls"); + } + if (config.database !== undefined && config.database !== 0) { + args.push("-n", String(config.database)); + } + + return args; +} diff --git a/wiki/changelog.md b/wiki/changelog.md index 37d5973..e447158 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -5,6 +5,17 @@ All notable changes to DBackup are documented here. ## v1.3.0 *Release: In Progress* +### โœจ Features + +- **ssh**: SSH remote execution mode for MySQL, MariaDB, PostgreSQL, MongoDB and Redis - database tools (mysqldump, pg_dump, mongodump, redis-cli) run directly on the remote host via SSH instead of requiring a local client or SSH tunnel +- **ssh**: New shared SSH infrastructure (`src/lib/ssh/`) with reusable client, shell escaping, remote binary detection, and per-adapter argument builders +- **ssh**: Generic SSH connection test endpoint - "Test SSH" button now works for all SSH-capable adapters, not just MSSQL +- **ui**: SSH configuration tab in the source editor for all SSH-capable database adapters (MySQL, MariaDB, PostgreSQL, MongoDB, Redis) with connection mode selector + +### ๐ŸŽจ Improvements + +- **sqlite**: Refactored SQLite SSH client into shared SSH module for code reuse across all database adapters + ### ๐Ÿณ Docker - **Image**: `skyfay/dbackup:v1.3.0` From 48ca4f46c2049f9f700e69e3605039b83c303c5a Mon Sep 17 00:00:00 2001 From: Manu Date: Sat, 28 Mar 2026 13:48:30 +0100 Subject: [PATCH 03/21] Show connectionMode selector and SSH-aware tabs Add support for adapters with a connectionMode field: render a config.connectionMode SchemaField in the adapter form (except for sqlite) and introduce an SshAwareTabLayout that displays the connection mode selector first and then shows contextual tabs (SSH / Connection / Configuration) based on the selected mode. This forces remount on mode change so the active tab resets correctly. Also update changelog and add a small import adjustment. --- src/components/adapter/adapter-form.tsx | 10 ++ src/components/adapter/form-sections.tsx | 147 +++++++++++++++++++++-- wiki/changelog.md | 1 + 3 files changed, 146 insertions(+), 12 deletions(-) diff --git a/src/components/adapter/adapter-form.tsx b/src/components/adapter/adapter-form.tsx index 29fcb06..321611f 100644 --- a/src/components/adapter/adapter-form.tsx +++ b/src/components/adapter/adapter-form.tsx @@ -273,6 +273,16 @@ export function AdapterForm({ type, adapters, onSuccess, initialData, onBack }: />
)} + {selectedAdapterId !== 'sqlite' && selectedAdapter && (selectedAdapter.configSchema as any).shape?.connectionMode && ( +
+ +
+ )}
diff --git a/src/components/adapter/form-sections.tsx b/src/components/adapter/form-sections.tsx index 230aa1e..c77e67c 100644 --- a/src/components/adapter/form-sections.tsx +++ b/src/components/adapter/form-sections.tsx @@ -1,5 +1,5 @@ import { useFormContext } from "react-hook-form"; -import { useState } from "react"; +import { useState, useEffect } from "react"; import { toast } from "sonner"; import { Tabs, @@ -167,20 +167,43 @@ export function DatabaseFormContent({ // Adapters that support SSH connection mode (have connectionMode field in schema) const hasSSH = adapter.configSchema.shape && "connectionMode" in adapter.configSchema.shape && !isMSSQL; - const showTabs = isMSSQL || hasSSH; - const tabCount = 2 + (isMSSQL ? 1 : 0) + (hasSSH ? 1 : 0); + + // SSH-capable adapters: show mode selector first, then contextual tabs + if (hasSSH) { + const isSSH = connectionMode === "ssh"; + const defaultTab = isSSH ? "ssh" : "connection"; + + // Before mode is selected, show nothing (selector is in the parent form) + if (!connectionMode) { + return null; + } + + return ( + + ); + } + + // MSSQL and adapters without SSH support + const tabCount = 2 + (isMSSQL ? 1 : 0); return ( Connection Configuration {isMSSQL && File Transfer} - {hasSSH && SSH} @@ -246,16 +269,116 @@ export function DatabaseFormContent({ )} )} + + ); +} - {hasSSH && ( - - - {connectionMode === "ssh" && ( +/** + * Tab layout for SSH-capable adapters. Uses key={connectionMode} to force remount on mode change, + * ensuring the active tab resets to the first tab. + */ +function SshAwareTabLayout({ + isSSH, + defaultTab, + adapter, + sshAuthType, + detectedVersion, + healthNotificationsDisabled, + onHealthNotificationsDisabledChange, +}: { + isSSH: boolean; + defaultTab: string; + adapter: AdapterDefinition; + sshAuthType: string; + detectedVersion?: string | null; + healthNotificationsDisabled?: boolean; + onHealthNotificationsDisabledChange?: (disabled: boolean) => void; +}) { + return ( +
+ {detectedVersion && ( +
+ + + Detected: {detectedVersion} + +
+ )} + + {isSSH ? ( + + + SSH Connection + Database + Configuration + + + - )} - + + + +

+ Database connection as seen from the SSH host (e.g. 127.0.0.1 if the database runs on the same server). +

+ +
+ + + {adapter.id === 'redis' && } + + {onHealthNotificationsDisabledChange && ( + + )} + +
+ ) : ( + + + Connection + Configuration + + + + + + + + {adapter.id === 'redis' && } + + {onHealthNotificationsDisabledChange && ( + + )} + + )} - +
); } diff --git a/wiki/changelog.md b/wiki/changelog.md index e447158..04759f5 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -14,6 +14,7 @@ All notable changes to DBackup are documented here. ### ๐ŸŽจ Improvements +- **ui**: Redesigned source form for SSH-capable adapters โ€” Connection Mode selector now appears first (like SQLite), SSH Connection tab is shown first in SSH mode so users configure SSH before database credentials - **sqlite**: Refactored SQLite SSH client into shared SSH module for code reuse across all database adapters ### ๐Ÿณ Docker From 044467567a29e7f99e0bda9a417ffec9e5e29b33 Mon Sep 17 00:00:00 2001 From: Manu Date: Sat, 28 Mar 2026 13:56:43 +0100 Subject: [PATCH 04/21] Left-align detected version badge Update alignment of the "Detected: {version}" badge in src/components/adapter/form-sections.tsx by replacing `justify-end` with `justify-start` in DatabaseFormContent and SshAwareTabLayout so the badge is left-aligned. --- src/components/adapter/form-sections.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/components/adapter/form-sections.tsx b/src/components/adapter/form-sections.tsx index c77e67c..54b4ad5 100644 --- a/src/components/adapter/form-sections.tsx +++ b/src/components/adapter/form-sections.tsx @@ -89,7 +89,7 @@ export function DatabaseFormContent({ return (
{detectedVersion && ( -
+
Detected: {detectedVersion} @@ -297,7 +297,7 @@ function SshAwareTabLayout({ return (
{detectedVersion && ( -
+
Detected: {detectedVersion} From fb56c358b9e2752d5e14920f1154c8e950ba3d0e Mon Sep 17 00:00:00 2001 From: Manu Date: Sat, 28 Mar 2026 14:02:03 +0100 Subject: [PATCH 05/21] Dump all databases when none specified When no database is provided, the dump function no longer throws immediately. It now imports getDatabases, logs that it's backing up all databases, fetches the database list from the server and logs the found DBs; an error is still thrown if no databases are found. This enables backing up the entire MySQL/MariaDB server when no specific database is given. --- src/lib/adapters/database/mysql/dump.ts | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/lib/adapters/database/mysql/dump.ts b/src/lib/adapters/database/mysql/dump.ts index cb82032..f02b212 100644 --- a/src/lib/adapters/database/mysql/dump.ts +++ b/src/lib/adapters/database/mysql/dump.ts @@ -3,6 +3,7 @@ import { LogLevel, LogType } from "@/lib/core/logs"; import { MySQLConfig, MariaDBConfig } from "@/lib/adapters/definitions"; import { getDialect } from "./dialects"; import { getMysqldumpCommand } from "./tools"; +import { getDatabases } from "./connection"; import fs from "fs/promises"; import path from "path"; import { spawn } from "child_process"; @@ -163,7 +164,13 @@ export async function dump(config: MySQLDumpConfig, destinationPath: string, onL else if (config.database) dbs = [config.database]; if (dbs.length === 0) { - throw new Error("No database specified for backup"); + log("No databases selected โ€” backing up all databases"); + dbs = await getDatabases(config); + log(`Found ${dbs.length} database(s): ${dbs.join(', ')}`); + } + + if (dbs.length === 0) { + throw new Error("No databases found on server"); } // Single DB: Direct dump (no TAR needed) From 497d5aa1db2ea000e34ca8e49d39d0450822512d Mon Sep 17 00:00:00 2001 From: Manu Date: Sat, 28 Mar 2026 14:31:19 +0100 Subject: [PATCH 06/21] Add SSH mode docs & adapter polling Document SSH remote-exec connection mode across the project and add silent polling for adapter health. - README and site index: add "Connection Modes" column and note SSH/Direct support per database. - User & developer docs: extensive SSH mode docs added for MySQL, MariaDB, PostgreSQL, MongoDB, Redis, SQLite and core adapter/architecture docs (fields, prerequisites, tooling, examples, and SSH architecture). - src: AdapterManager now performs a silent background refresh (every 10s) to keep adapter health/status up to date without showing spinners or error toasts. - Changelog: note UI auto-refresh, SSH docs, and a MySQL bugfix for jobs with no DB selected. These changes add documentation and UX improvements for SSH-based backup workflows and keep source/destination health indicators fresher in the UI. --- README.md | 18 +-- src/components/adapter/adapter-manager.tsx | 19 +++ wiki/changelog.md | 12 ++ wiki/developer-guide/adapters/database.md | 167 +++++++++++++++++++-- wiki/developer-guide/architecture.md | 24 ++- wiki/developer-guide/core/adapters.md | 5 + wiki/developer-guide/index.md | 1 + wiki/index.md | 18 +-- wiki/user-guide/sources/index.md | 52 ++++++- wiki/user-guide/sources/mongodb.md | 146 +++++++++++++++--- wiki/user-guide/sources/mysql.md | 149 +++++++++++++++++- wiki/user-guide/sources/postgresql.md | 134 ++++++++++++++--- wiki/user-guide/sources/redis.md | 106 ++++++++++--- 13 files changed, 748 insertions(+), 103 deletions(-) diff --git a/README.md b/README.md index 64d388b..cf27f04 100644 --- a/README.md +++ b/README.md @@ -150,15 +150,15 @@ Open [https://localhost:3000](https://localhost:3000) and create your admin acco ## ๐Ÿ—„๏ธ Supported Databases -| Database | Versions | -| :--- | :--- | -| PostgreSQL | 12 โ€“ 18 | -| MySQL | 5.7, 8, 9 | -| MariaDB | 10, 11 | -| MongoDB | 4 โ€“ 8 | -| Redis | 6.x, 7.x, 8.x | -| SQLite | 3.x (Local & SSH) | -| Microsoft SQL Server | 2017, 2019, 2022 | +| Database | Versions | Connection Modes | +| :--- | :--- | :--- | +| PostgreSQL | 12 โ€“ 18 | Direct, SSH | +| MySQL | 5.7, 8, 9 | Direct, SSH | +| MariaDB | 10, 11 | Direct, SSH | +| MongoDB | 4 โ€“ 8 | Direct, SSH | +| Redis | 6.x, 7.x, 8.x | Direct, SSH | +| SQLite | 3.x | Local, SSH | +| Microsoft SQL Server | 2017, 2019, 2022 | Direct (+ SSH for file transfer) | ## โ˜๏ธ Supported Destinations diff --git a/src/components/adapter/adapter-manager.tsx b/src/components/adapter/adapter-manager.tsx index 1bc27c4..b752167 100644 --- a/src/components/adapter/adapter-manager.tsx +++ b/src/components/adapter/adapter-manager.tsx @@ -55,12 +55,31 @@ export function AdapterManager({ type, title, description, canManage = true, per } }, [type]); + // Silent polling refresh (no loading spinner, no error toasts) + const silentRefresh = useCallback(async () => { + try { + const res = await fetch(`/api/adapters?type=${type}`); + if (res.ok) { + const data = await res.json(); + setConfigs(data); + } + } catch { + // Silent โ€” don't disturb the user on background poll failures + } + }, [type]); + useEffect(() => { // Filter definitions by type setAvailableAdapters(ADAPTER_DEFINITIONS.filter(d => d.type === type)); fetchConfigs(); }, [type, fetchConfigs]); + // Poll every 10 seconds to keep health status up to date + useEffect(() => { + const interval = setInterval(silentRefresh, 10000); + return () => clearInterval(interval); + }, [silentRefresh]); + const handleDelete = (id: string) => { setDeletingId(id); }; diff --git a/wiki/changelog.md b/wiki/changelog.md index 04759f5..c33b310 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -12,11 +12,23 @@ All notable changes to DBackup are documented here. - **ssh**: Generic SSH connection test endpoint - "Test SSH" button now works for all SSH-capable adapters, not just MSSQL - **ui**: SSH configuration tab in the source editor for all SSH-capable database adapters (MySQL, MariaDB, PostgreSQL, MongoDB, Redis) with connection mode selector +### ๐Ÿ› Bug Fixes + +- **mysql**: Backup jobs with no database selected now auto-discover all databases instead of failing with "No database specified" + ### ๐ŸŽจ Improvements - **ui**: Redesigned source form for SSH-capable adapters โ€” Connection Mode selector now appears first (like SQLite), SSH Connection tab is shown first in SSH mode so users configure SSH before database credentials +- **ui**: Sources and Destinations pages now auto-refresh every 10 seconds to keep health status up to date - **sqlite**: Refactored SQLite SSH client into shared SSH module for code reuse across all database adapters +### ๐Ÿ“ Documentation + +- **wiki**: Updated all database source guides (MySQL, MariaDB, PostgreSQL, MongoDB, Redis) with SSH mode configuration, prerequisites, setup guides, and troubleshooting +- **wiki**: New "Connection Modes" overview section on the Sources index page explaining Direct vs SSH mode and shared SSH config fields +- **wiki**: Added SSH remote execution architecture section to the Developer Guide (database adapters, adapter system, architecture) +- **wiki**: Each adapter guide now lists required CLI tools for the remote host with installation commands per OS + ### ๐Ÿณ Docker - **Image**: `skyfay/dbackup:v1.3.0` diff --git a/wiki/developer-guide/adapters/database.md b/wiki/developer-guide/adapters/database.md index ddfff83..57ed0c3 100644 --- a/wiki/developer-guide/adapters/database.md +++ b/wiki/developer-guide/adapters/database.md @@ -4,15 +4,15 @@ Database adapters handle the dump and restore operations for different database ## Available Adapters -| Adapter | ID | CLI Tools Required | File Extension | -| :--- | :--- | :--- | :--- | -| MySQL | `mysql` | `mysql`, `mysqldump` | `.sql` | -| MariaDB | `mariadb` | `mysql`, `mysqldump` | `.sql` | -| PostgreSQL | `postgres` | `psql`, `pg_dump`, `pg_restore` | `.sql` | -| MongoDB | `mongodb` | `mongodump`, `mongorestore` | `.archive` | -| SQLite | `sqlite` | None (file copy) | `.db` | -| MSSQL | `mssql` | None (TDS protocol) | `.bak` | -| Redis | `redis` | `redis-cli` | `.rdb` | +| Adapter | ID | CLI Tools Required | SSH Mode | File Extension | +| :--- | :--- | :--- | :--- | :--- | +| MySQL | `mysql` | `mysql`, `mysqldump` | โœ… | `.sql` | +| MariaDB | `mariadb` | `mysql`, `mysqldump` | โœ… | `.sql` | +| PostgreSQL | `postgres` | `psql`, `pg_dump`, `pg_restore` | โœ… | `.sql` | +| MongoDB | `mongodb` | `mongodump`, `mongorestore` | โœ… | `.archive` | +| SQLite | `sqlite` | None (file copy) | โœ… | `.db` | +| MSSQL | `mssql` | None (TDS protocol) | โŒ (uses SFTP) | `.bak` | +| Redis | `redis` | `redis-cli` | โœ… | `.rdb` | ## Backup File Extensions @@ -137,6 +137,154 @@ Returns: If `getDatabasesWithStats()` is not implemented, falls back to `getDatabases()` and returns names only (without size/table count). +## SSH Mode Architecture + +Most database adapters support an SSH remote execution mode. Instead of running CLI tools locally and connecting to the database over TCP, DBackup connects via SSH to the target server and runs database tools **remotely**. This is **not** an SSH tunnel โ€” the dump/restore commands execute on the remote host. + +### Shared SSH Infrastructure (`src/lib/ssh/`) + +``` +src/lib/ssh/ +โ”œโ”€โ”€ index.ts # Re-exports +โ”œโ”€โ”€ ssh-client.ts # SshClient class (connect, exec, execStream, end) +โ””โ”€โ”€ utils.ts # shellEscape, remoteEnv, remoteBinaryCheck, extractSshConfig, arg builders +``` + +#### `SshClient` + +Generic SSH2 client used by all adapters: + +```typescript +import { SshClient, SshConnectionConfig } from "@/lib/ssh"; + +const client = new SshClient(); +await client.connect(sshConfig); + +// Simple command execution (buffered) +const result = await client.exec("mysqldump --version"); +// { stdout: "...", stderr: "...", code: 0 } + +// Streaming execution (for dumps โ€” pipes stdout to a writable stream) +const stream = await client.execStream("pg_dump -F c mydb"); +stream.pipe(outputFile); + +client.end(); +``` + +Configuration: `readyTimeout: 20000ms`, `keepaliveInterval: 10000ms`, `keepaliveCountMax: 3`. + +#### Shared Utilities + +| Function | Purpose | +| :--- | :--- | +| `shellEscape(value)` | Wraps value in single quotes, escapes embedded quotes | +| `remoteEnv(vars, cmd)` | Prepends env vars to a command (e.g., `MYSQL_PWD='...' mysqldump`) | +| `remoteBinaryCheck(client, ...candidates)` | Checks if binary exists on remote host, returns resolved path | +| `isSSHMode(config)` | Returns `true` if `config.connectionMode === "ssh"` | +| `extractSshConfig(config)` | Extracts `SshConnectionConfig` from adapter config with `sshHost` prefix | +| `extractSqliteSshConfig(config)` | Same for SQLite (uses `host` instead of `sshHost`) | +| `buildMysqlArgs(config)` | Builds MySQL CLI args from adapter config | +| `buildPsqlArgs(config)` | Builds PostgreSQL CLI args | +| `buildMongoArgs(config)` | Builds MongoDB CLI args | +| `buildRedisArgs(config)` | Builds Redis CLI args | + +#### Shared SSH Config Fields (`sshFields`) + +All SSH-capable schemas spread the shared `sshFields` object from `definitions.ts`: + +```typescript +const sshFields = { + connectionMode: z.enum(["direct", "ssh"]).default("direct"), + sshHost: z.string().optional(), + sshPort: z.coerce.number().default(22).optional(), + sshUsername: z.string().optional(), + sshAuthType: z.enum(["password", "privateKey", "agent"]).default("password").optional(), + sshPassword: z.string().optional(), + sshPrivateKey: z.string().optional(), + sshPassphrase: z.string().optional(), +}; + +// Usage in schema: +export const MySQLSchema = z.object({ + host: z.string().default("localhost"), + // ... database fields ... + ...sshFields, +}); +``` + +### Adding SSH Mode to an Adapter + +Each adapter operation (`dump`, `restore`, `test`, `getDatabases`) checks for SSH mode and branches: + +```typescript +import { isSSHMode, extractSshConfig } from "@/lib/ssh"; + +async dump(config, destinationPath, onLog) { + const sshConfig = extractSshConfig(config); + + if (sshConfig) { + return dumpViaSSH(config, sshConfig, destinationPath, onLog); + } + return dumpDirect(config, destinationPath, onLog); +} +``` + +#### SSH Dump Pattern + +```typescript +async function dumpViaSSH(config, sshConfig, destPath, onLog) { + const client = new SshClient(); + try { + await client.connect(sshConfig); + + // 1. Check binary availability + const binary = await remoteBinaryCheck(client, "mysqldump", "mariadb-dump"); + + // 2. Build command with argument builder + const args = buildMysqlArgs(config); + const cmd = remoteEnv( + { MYSQL_PWD: config.password }, + `${binary} ${args.join(" ")} --single-transaction ${shellEscape(config.database)}` + ); + + // 3. Stream output to local file + const stream = await client.execStream(cmd); + const output = createWriteStream(destPath); + stream.pipe(output); + + await new Promise((resolve, reject) => { + stream.on("exit", (code) => code === 0 ? resolve() : reject()); + stream.on("error", reject); + }); + } finally { + client.end(); + } +} +``` + +::: warning Event Handler +Always use `stream.on("exit", ...)` instead of `stream.on("close", ...)` for SSH2 exec streams. The `close` event does not fire reliably when piping stdin/stdout through SSH channels. +::: + +### Test SSH Endpoint + +`POST /api/adapters/test-ssh` provides a generic SSH connectivity test: + +```json +{ + "adapterId": "mysql", + "config": { + "sshHost": "192.168.1.10", + "sshPort": 22, + "sshUsername": "deploy", + "sshAuthType": "password", + "sshPassword": "..." + } +} +``` + +For non-MSSQL adapters, runs `echo "SSH connection test"`. For MSSQL, tests SFTP access to the backup path. + ## MySQL Adapter ### Configuration Schema @@ -150,6 +298,7 @@ const MySQLSchema = z.object({ database: z.union([z.string(), z.array(z.string())]).default(""), options: z.string().optional().describe("Additional mysqldump options"), disableSsl: z.boolean().default(false).describe("Disable SSL"), + ...sshFields, }); ``` diff --git a/wiki/developer-guide/architecture.md b/wiki/developer-guide/architecture.md index d9b1d28..9e142a4 100644 --- a/wiki/developer-guide/architecture.md +++ b/wiki/developer-guide/architecture.md @@ -41,14 +41,26 @@ DBackup follows a strictly layered architecture to decouple the UI from business โ”‚ Adapters โ”‚ โ”‚ Adapters โ”‚ โ”‚ Adapters โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ โ”‚ โ”‚ - โ–ผ โ–ผ โ–ผ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ”‚ MySQL โ”‚ โ”‚ S3 โ”‚ โ”‚ Discord โ”‚ - โ”‚PostgreSQLโ”‚ โ”‚ SFTP โ”‚ โ”‚ Email โ”‚ - โ”‚ MongoDB โ”‚ โ”‚ Local โ”‚ โ”‚ โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”Œโ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ” โ”‚ โ”‚ + โ–ผ โ–ผ โ–ผ โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Direct โ”‚ โ”‚ SSH โ”‚ โ”‚ S3 โ”‚ โ”‚ Discord โ”‚ + โ”‚ (TCP) โ”‚ โ”‚(Exec)โ”‚ โ”‚ SFTP โ”‚ โ”‚ Email โ”‚ + โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ Local โ”‚ โ”‚ โ”‚ + โ”‚ MySQL โ”‚ โ”‚MySQL โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚PostgreSQLโ”‚ โ”‚PG โ”‚ + โ”‚ MongoDB โ”‚ โ”‚Mongo โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ ``` +### SSH Remote Execution + +Database adapters support two connection modes: +- **Direct**: CLI tools run locally on the DBackup server, connecting to the database via TCP +- **SSH**: CLI tools run remotely on the target server via SSH exec (not tunneling) + +SSH mode uses a shared infrastructure (`src/lib/ssh/`) with `SshClient`, `shellEscape`, `remoteBinaryCheck`, and per-adapter argument builders. See [Database Adapters](/developer-guide/adapters/database#ssh-mode-architecture) for implementation details. + ## Four-Layer Architecture ### 1. App Router Layer (`src/app`) diff --git a/wiki/developer-guide/core/adapters.md b/wiki/developer-guide/core/adapters.md index 2454ee9..7376fea 100644 --- a/wiki/developer-guide/core/adapters.md +++ b/wiki/developer-guide/core/adapters.md @@ -13,6 +13,11 @@ src/lib/adapters/ โ”œโ”€โ”€ storage/ # Local, S3, SFTP, etc. โ”œโ”€โ”€ notification/ # Discord, Email, etc. โ””โ”€โ”€ oidc/ # SSO providers (Authentik, PocketID, Generic) + +src/lib/ssh/ # Shared SSH infrastructure +โ”œโ”€โ”€ ssh-client.ts # Generic SSH2 client +โ”œโ”€โ”€ utils.ts # Shell escaping, binary checks, arg builders +โ””โ”€โ”€ index.ts # Re-exports ``` ## Adapter Types diff --git a/wiki/developer-guide/index.md b/wiki/developer-guide/index.md index 4a152d0..85f2489 100644 --- a/wiki/developer-guide/index.md +++ b/wiki/developer-guide/index.md @@ -90,6 +90,7 @@ pnpm test:ui - **Server Actions delegate to Services** - No business logic in actions - **Adapters are pluggable** - Follow interface contracts - **Streaming architecture** - Efficient memory usage +- **SSH exec over tunneling** - Database tools run remotely via SSH, output streams back - **Permission checks everywhere** - RBAC enforcement ## Contributing diff --git a/wiki/index.md b/wiki/index.md index 8d98c3a..7fd1802 100644 --- a/wiki/index.md +++ b/wiki/index.md @@ -110,15 +110,15 @@ Then open [https://localhost:3000](https://localhost:3000) and create your first :::tabs == ๐Ÿ—„๏ธ Databases -| Database | Versions | -| :--- | :--- | -| **PostgreSQL** | 12, 13, 14, 15, 16, 17, 18 | -| **MySQL** | 5.7, 8.x, 9.x | -| **MariaDB** | 10.x, 11.x | -| **MongoDB** | 4.x, 5.x, 6.x, 7.x, 8.x | -| **Redis** | 6.x, 7.x, 8.x | -| **SQLite** | 3.x (Local & SSH) | -| **Microsoft SQL Server** | 2017, 2019, 2022, Azure SQL Edge | +| Database | Versions | Modes | +| :--- | :--- | :--- | +| **PostgreSQL** | 12, 13, 14, 15, 16, 17, 18 | Direct, SSH | +| **MySQL** | 5.7, 8.x, 9.x | Direct, SSH | +| **MariaDB** | 10.x, 11.x | Direct, SSH | +| **MongoDB** | 4.x, 5.x, 6.x, 7.x, 8.x | Direct, SSH | +| **Redis** | 6.x, 7.x, 8.x | Direct, SSH | +| **SQLite** | 3.x | Local, SSH | +| **Microsoft SQL Server** | 2017, 2019, 2022, Azure SQL Edge | Direct (+ SSH file transfer) | == โ˜๏ธ Storage diff --git a/wiki/user-guide/sources/index.md b/wiki/user-guide/sources/index.md index fc79f84..1467d9a 100644 --- a/wiki/user-guide/sources/index.md +++ b/wiki/user-guide/sources/index.md @@ -18,10 +18,54 @@ DBackup supports a wide variety of database engines. 1. Navigate to **Sources** โ†’ **Add Source** 2. Select the database type -3. Fill in connection details (host, port, credentials) -4. Click **Test Connection** to verify -5. Click **Fetch Databases** to list available databases -6. Select which databases to backup โ†’ **Save** +3. Choose **Connection Mode**: Direct or SSH (see below) +4. Fill in connection details (host, port, credentials) +5. Click **Test Connection** to verify +6. Click **Fetch Databases** to list available databases +7. Select which databases to backup โ†’ **Save** + +## Connection Modes + +DBackup supports two connection modes for most database types: + +| Mode | Description | Use Case | +| :--- | :--- | :--- | +| **Direct** | DBackup connects directly to the database via TCP | Database is on the same network / Docker network or connected via VPN (recommended) | +| **SSH** | DBackup connects via SSH and runs database tools on the remote host | Database is on a remote server, not directly reachable, or no local CLI tools installed | + +### SSH Mode + +In SSH mode, DBackup connects to the remote server via SSH and executes database CLI tools (e.g., `mysqldump`, `pg_dump`) **directly on that server**. The backup output is streamed back to DBackup over the SSH connection. This is **not** an SSH tunnel - the database tools run remotely. + +**Supported adapters:** MySQL, MariaDB, PostgreSQL, MongoDB, Redis, SQLite + +::: warning Required: Database CLI Tools on Remote Host +When using SSH mode, the required database client tools **must be installed on the remote SSH server**. DBackup does not transfer or install any tools - it only executes them. See the individual adapter pages for the specific tools required. +::: + +### SSH Configuration Fields + +| Field | Description | Default | Required | +| :--- | :--- | :--- | :--- | +| **SSH Host** | SSH server hostname or IP | - | โœ… | +| **SSH Port** | SSH server port | `22` | โŒ | +| **SSH Username** | SSH login username | - | โœ… | +| **SSH Auth Type** | Authentication method: Password, Private Key, or Agent | `Password` | โœ… | +| **SSH Password** | SSH password (for password auth) | - | โŒ | +| **SSH Private Key** | PEM-formatted private key (for key auth) | - | โŒ | +| **SSH Passphrase** | Passphrase for encrypted private key | - | โŒ | + +::: tip SSH Agent +To use SSH agent forwarding in Docker, mount the agent socket: +```yaml +services: + dbackup: + volumes: + - ${SSH_AUTH_SOCK}:/ssh-agent + environment: + - SSH_AUTH_SOCK=/ssh-agent +``` +::: ## Connection from Docker diff --git a/wiki/user-guide/sources/mongodb.md b/wiki/user-guide/sources/mongodb.md index 2becaf2..2115b3f 100644 --- a/wiki/user-guide/sources/mongodb.md +++ b/wiki/user-guide/sources/mongodb.md @@ -10,25 +10,99 @@ Configure MongoDB databases for backup. DBackup uses `mongodump` from MongoDB Database Tools. +## Connection Modes + +| Mode | Description | +| :--- | :--- | +| **Direct** | DBackup connects via TCP and runs `mongodump` locally | +| **SSH** | DBackup connects via SSH and runs `mongodump` on the remote host | + ## Configuration -### Basic Settings +| Field | Description | Default | Required | +| :--- | :--- | :--- | :--- | +| **Connection Mode** | Direct (TCP) or SSH | `Direct` | โœ… | +| **Connection URI** | Full MongoDB URI (overrides other settings) | โ€” | โŒ | +| **Host** | Database server hostname | `localhost` | โœ… | +| **Port** | MongoDB port | `27017` | โœ… | +| **User** | Database username | โ€” | โŒ | +| **Password** | Database password | โ€” | โŒ | +| **Auth Database** | Authentication database | `admin` | โŒ | +| **Database** | Database name(s) to backup | All databases | โŒ | +| **Additional Options** | Extra `mongodump` flags | โ€” | โŒ | -| Field | Description | Default | -| :--- | :--- | :--- | -| **Connection URI** | Full MongoDB URI (overrides other settings) | Optional | -| **Host** | Database server hostname | `localhost` | -| **Port** | MongoDB port | `27017` | -| **User** | Database username | Optional | -| **Password** | Database password | Optional | -| **Auth Database** | Authentication database | `admin` | -| **Database** | Database name(s) to backup | All databases | +### SSH Mode Fields -### Advanced Options +These fields appear when **Connection Mode** is set to **SSH**: -| Field | Description | -| :--- | :--- | -| **Additional Options** | Extra `mongodump` flags | +| Field | Description | Default | Required | +| :--- | :--- | :--- | :--- | +| **SSH Host** | SSH server hostname or IP | โ€” | โœ… | +| **SSH Port** | SSH server port | `22` | โŒ | +| **SSH Username** | SSH login username | โ€” | โœ… | +| **SSH Auth Type** | Password, Private Key, or Agent | `Password` | โœ… | +| **SSH Password** | SSH password | โ€” | โŒ | +| **SSH Private Key** | PEM-formatted private key | โ€” | โŒ | +| **SSH Passphrase** | Passphrase for encrypted key | โ€” | โŒ | + +## Prerequisites + +### Direct Mode + +The DBackup server needs `mongodump`, `mongorestore`, and `mongosh` CLI tools installed. + +**Docker**: Already included in the DBackup image. + +### SSH Mode + +The **remote SSH server** must have the following tools installed: + +```bash +# Required for backup +mongodump + +# Required for restore +mongorestore + +# Required for connection testing and database listing +mongosh +``` + +**Install on the remote host:** + +
+Debian/Ubuntu โ€” MongoDB Database Tools + mongosh + +Add the official MongoDB repository first: +```bash +# Import MongoDB GPG key +curl -fsSL https://www.mongodb.org/static/pgp/server-8.0.asc | \ + gpg --dearmor -o /usr/share/keyrings/mongodb-server-8.0.gpg + +# Add repository (Debian 12 / Ubuntu 24.04 example) +echo "deb [signed-by=/usr/share/keyrings/mongodb-server-8.0.gpg] https://repo.mongodb.org/apt/debian bookworm/mongodb-org/8.0 main" | \ + tee /etc/apt/sources.list.d/mongodb-org-8.0.list + +# Install tools +apt-get update +apt-get install mongodb-database-tools mongodb-mongosh +``` + +See the official docs for other distro versions: +- [MongoDB Database Tools](https://www.mongodb.com/docs/database-tools/installation/installation-linux/) +- [mongosh](https://www.mongodb.com/docs/mongodb-shell/install/) + +
+ +```bash +# macOS +brew install mongodb-database-tools +brew install mongosh +``` + +::: danger Important +In SSH mode, the MongoDB tools must be installed on the remote server. DBackup executes them remotely via SSH and streams the output back. +::: ## Connection Methods @@ -78,12 +152,29 @@ For Atlas clusters, create a user with "Backup Admin" role in the Atlas UI. ## Backup Process +### Direct Mode + DBackup uses `mongodump` which creates a binary BSON dump: - Consistent point-in-time backup - Includes indexes and collection options - Supports oplog for replica set backups +### SSH Mode + +In SSH mode, DBackup: + +1. Connects to the remote server via SSH +2. Checks that `mongodump` is available on the remote host +3. Executes `mongodump --archive --gzip` remotely +4. Streams the archive output back over the SSH connection +5. Applies additional encryption locally +6. Uploads to the configured storage destination + +::: tip Host in SSH Mode +The **Host** field refers to the MongoDB hostname **as seen from the SSH server**. If MongoDB runs on the same machine as the SSH server, use `127.0.0.1` or `localhost`. Connection URIs also work in SSH mode. +::: + ### Output Format The backup creates a directory structure: @@ -224,6 +315,21 @@ not authorized on admin to execute command db.grantRolesToUser("dbackup", [{ role: "backup", db: "admin" }]) ``` +### SSH: Binary Not Found + +``` +Required binary not found on remote server. Tried: mongodump +``` + +**Solution:** Install MongoDB Database Tools on the remote server. See [MongoDB Database Tools Installation](https://www.mongodb.com/docs/database-tools/installation/). + +### SSH: Connection Refused + +**Solution:** +1. Verify SSH is running: `systemctl status sshd` +2. Check SSH port and firewall rules +3. Test manually: `ssh user@host` + ## Restore To restore a MongoDB backup: @@ -241,12 +347,8 @@ To restore a MongoDB backup: - **Preserve existing data**: Merge/upsert mode - **Specific collections**: Restore selected collections only -## Best Practices +## Next Steps -1. **Use `backup` role** instead of `root` for backup user -2. **Enable oplog** for point-in-time recovery with replica sets -3. **Schedule during low-traffic periods** -4. **Use secondary read preference** for replica sets -5. **Test restores regularly** to verify backup integrity -6. **Monitor backup duration** for performance tuning -7. **Consider compression** (enabled by default in mongodump 100.x) +- [Create a Backup Job](/user-guide/jobs/) +- [Enable Encryption](/user-guide/security/encryption) +- [Configure Retention](/user-guide/jobs/retention) diff --git a/wiki/user-guide/sources/mysql.md b/wiki/user-guide/sources/mysql.md index 4a64f49..146c20f 100644 --- a/wiki/user-guide/sources/mysql.md +++ b/wiki/user-guide/sources/mysql.md @@ -9,10 +9,18 @@ Configure MySQL or MariaDB databases for backup using `mysqldump` / `mariadb-dum | **MySQL** | 5.7, 8.0, 8.4, 9.0 | | **MariaDB** | 10.x, 11.x | +## Connection Modes + +| Mode | Description | +| :--- | :--- | +| **Direct** | DBackup connects via TCP and runs `mysqldump` locally | +| **SSH** | DBackup connects via SSH and runs `mysqldump` on the remote host | + ## Configuration | Field | Description | Default | Required | | :--- | :--- | :--- | :--- | +| **Connection Mode** | Direct (TCP) or SSH | `Direct` | โœ… | | **Host** | Database server hostname | `localhost` | โœ… | | **Port** | MySQL port | `3306` | โœ… | | **User** | Database username | โ€” | โœ… | @@ -21,6 +29,68 @@ Configure MySQL or MariaDB databases for backup using `mysqldump` / `mariadb-dum | **Additional Options** | Extra `mysqldump` flags | โ€” | โŒ | | **Disable SSL** | Disable SSL for self-signed certificates | `false` | โŒ | +### SSH Mode Fields + +These fields appear when **Connection Mode** is set to **SSH**: + +| Field | Description | Default | Required | +| :--- | :--- | :--- | :--- | +| **SSH Host** | SSH server hostname or IP | โ€” | โœ… | +| **SSH Port** | SSH server port | `22` | โŒ | +| **SSH Username** | SSH login username | โ€” | โœ… | +| **SSH Auth Type** | Password, Private Key, or Agent | `Password` | โœ… | +| **SSH Password** | SSH password | โ€” | โŒ | +| **SSH Private Key** | PEM-formatted private key | โ€” | โŒ | +| **SSH Passphrase** | Passphrase for encrypted key | โ€” | โŒ | + +## Prerequisites + +### Direct Mode + +The DBackup server (or Docker container) needs `mysql` and `mysqldump` CLI tools installed. + +**Docker**: Already included in the DBackup image. + +### SSH Mode + +The **remote SSH server** must have the following tools installed: + +```bash +# Required for backup +mysqldump # or mariadb-dump (MariaDB) + +# Required for restore +mysql # or mariadb (MariaDB) +``` + +DBackup auto-detects which binary is available (`mysqldump` vs `mariadb-dump`, `mysql` vs `mariadb`). + +**Install on the remote host:** +```bash +# Debian/Ubuntu (MySQL client) +apt-get install default-mysql-client + +# Debian/Ubuntu (MariaDB client โ€” also provides mysqldump) +apt-get install mariadb-client + +# RHEL/CentOS/Fedora +dnf install mysql + +# Alpine +apk add mysql-client + +# macOS +brew install mysql-client +``` + +::: tip Debian ships MariaDB by default +On Debian, the `mysql-client` package no longer exists. Use `default-mysql-client` (which installs `mariadb-client-compat`) or install `mariadb-client` directly. Both provide `mysqldump` and `mysql` commands that work with MySQL and MariaDB servers. +::: + +::: danger Important +In SSH mode, DBackup does **not** use local CLI tools. The database tools must be installed on the remote server where SSH connects to. DBackup executes them remotely and streams the output back. +::: + ## Setup Guide ### 1. Create a Backup User @@ -40,12 +110,31 @@ For backup-only operations, `SELECT`, `SHOW VIEW`, `TRIGGER`, and `LOCK TABLES` ### 2. Configure in DBackup +#### Direct Mode + +1. Go to **Sources** โ†’ **Add Source** +2. Select **MySQL** or **MariaDB** +3. Keep Connection Mode as **Direct** +4. Enter connection details +5. Click **Test Connection** +6. Click **Fetch Databases** and select databases +7. Save + +#### SSH Mode + 1. Go to **Sources** โ†’ **Add Source** 2. Select **MySQL** or **MariaDB** -3. Enter connection details -4. Click **Test Connection** -5. Click **Fetch Databases** and select databases -6. Save +3. Set Connection Mode to **SSH** +4. In the **SSH Connection** tab: enter SSH host, username, and authentication details +5. Click **Test SSH** to verify SSH connectivity +6. In the **Database** tab: enter MySQL host (usually `127.0.0.1` or `localhost` โ€” relative to the SSH server), port, user, and password +7. Click **Test Connection** to verify database connectivity via SSH +8. Click **Fetch Databases** and select databases +9. Save + +::: tip Host in SSH Mode +The **Host** field in SSH mode refers to the database hostname **as seen from the SSH server**, not from DBackup. If MySQL runs on the same machine as the SSH server, use `127.0.0.1` or `localhost`. +::: ### 3. Docker Network @@ -79,6 +168,8 @@ Use `mysql` as the hostname in DBackup. ## How It Works +### Direct Mode + DBackup uses `mysqldump` (or `mariadb-dump` for MariaDB) with these default flags: - `--single-transaction` โ€” Consistent backup without locking (InnoDB) @@ -88,6 +179,19 @@ DBackup uses `mysqldump` (or `mariadb-dump` for MariaDB) with these default flag Output: `.sql` file with `CREATE` and `INSERT` statements. +### SSH Mode + +In SSH mode, DBackup: + +1. Connects to the remote server via SSH +2. Checks that `mysqldump` (or `mariadb-dump`) is available on the remote host +3. Executes the dump command remotely with the same flags as direct mode +4. Streams the SQL output back over the SSH connection +5. Applies compression and encryption locally on the DBackup server +6. Uploads the processed backup to the configured storage destination + +The database password is passed securely via the `MYSQL_PWD` environment variable in the remote session โ€” it does not appear in the process arguments or shell history. + ### Multi-Database Backups When backing up multiple databases, DBackup creates a **TAR archive**: @@ -154,6 +258,43 @@ bind-address = 0.0.0.0 --ssl-mode=REQUIRED --ssl-ca=/path/to/ca.pem ``` +### SSH: Binary Not Found + +``` +Required binary not found on remote server. Tried: mysqldump, mariadb-dump +``` + +**Solution:** Install the MySQL/MariaDB client package on the remote server: +```bash +# Ubuntu/Debian +apt-get install mysql-client +# or +apt-get install mariadb-client +``` + +### SSH: Connection Refused + +``` +SSH connection failed: connect ECONNREFUSED +``` + +**Solution:** +1. Verify SSH is running on the remote server: `systemctl status sshd` +2. Check the SSH port (default: 22) +3. Check firewall rules allow SSH from the DBackup server +4. Test manually: `ssh user@host` + +### SSH: Permission Denied + +``` +SSH connection failed: All configured authentication methods failed +``` + +**Solution:** +1. Verify SSH credentials (username, password, or key) +2. For private key auth, ensure the key is in PEM or OpenSSH format +3. Check the remote server allows the chosen auth method in `sshd_config` + ## Next Steps - [Create a Backup Job](/user-guide/jobs/) diff --git a/wiki/user-guide/sources/postgresql.md b/wiki/user-guide/sources/postgresql.md index 646048a..a274b66 100644 --- a/wiki/user-guide/sources/postgresql.md +++ b/wiki/user-guide/sources/postgresql.md @@ -10,23 +10,81 @@ Configure PostgreSQL databases for backup. DBackup uses `pg_dump` from PostgreSQL 18 client, which is backward compatible with older server versions. +## Connection Modes + +| Mode | Description | +| :--- | :--- | +| **Direct** | DBackup connects via TCP and runs `pg_dump` locally | +| **SSH** | DBackup connects via SSH and runs `pg_dump` on the remote host | + ## Configuration -### Basic Settings +| Field | Description | Default | Required | +| :--- | :--- | :--- | :--- | +| **Connection Mode** | Direct (TCP) or SSH | `Direct` | โœ… | +| **Host** | Database server hostname | `localhost` | โœ… | +| **Port** | PostgreSQL port | `5432` | โœ… | +| **User** | Database username | โ€” | โœ… | +| **Password** | Database password | โ€” | โŒ | +| **Database** | Database name(s) to backup | All databases | โŒ | +| **Additional Options** | Extra `pg_dump` flags | โ€” | โŒ | -| Field | Description | Default | -| :--- | :--- | :--- | -| **Host** | Database server hostname | `localhost` | -| **Port** | PostgreSQL port | `5432` | -| **User** | Database username | Required | -| **Password** | Database password | Optional | -| **Database** | Database name(s) to backup | All databases | +### SSH Mode Fields -### Advanced Options +These fields appear when **Connection Mode** is set to **SSH**: -| Field | Description | -| :--- | :--- | -| **Additional Options** | Extra `pg_dump` flags | +| Field | Description | Default | Required | +| :--- | :--- | :--- | :--- | +| **SSH Host** | SSH server hostname or IP | โ€” | โœ… | +| **SSH Port** | SSH server port | `22` | โŒ | +| **SSH Username** | SSH login username | โ€” | โœ… | +| **SSH Auth Type** | Password, Private Key, or Agent | `Password` | โœ… | +| **SSH Password** | SSH password | โ€” | โŒ | +| **SSH Private Key** | PEM-formatted private key | โ€” | โŒ | +| **SSH Passphrase** | Passphrase for encrypted key | โ€” | โŒ | + +## Prerequisites + +### Direct Mode + +The DBackup server needs `psql`, `pg_dump`, and `pg_restore` CLI tools installed. + +**Docker**: Already included in the DBackup image. + +### SSH Mode + +The **remote SSH server** must have the following tools installed: + +```bash +# Required for backup +pg_dump + +# Required for restore +pg_restore +psql # Used for connection testing and database listing + +# Required for database listing +psql +``` + +**Install on the remote host:** +```bash +# Ubuntu/Debian +apt-get install postgresql-client + +# RHEL/CentOS/Fedora +dnf install postgresql + +# Alpine +apk add postgresql-client + +# macOS +brew install libpq +``` + +::: danger Important +In SSH mode, the database tools must be installed on the remote server. DBackup executes them remotely via SSH and streams the output back. The version on the remote server determines compatibility. +::: ## Setting Up a Backup User @@ -59,12 +117,31 @@ GRANT pg_read_all_data TO dbackup; ## Backup Process +### Direct Mode + DBackup uses `pg_dump` with these default options: - `--format=plain`: SQL text format - `--no-owner`: Don't output ownership commands - `--no-acl`: Don't output access privilege commands +### SSH Mode + +In SSH mode, DBackup: + +1. Connects to the remote server via SSH +2. Checks that `pg_dump` and `psql` are available on the remote host +3. Executes `pg_dump` remotely (custom format with compression: `-F c -Z 6`) +4. Streams the dump output back over the SSH connection +5. Applies additional compression/encryption locally +6. Uploads to the configured storage destination + +The password is passed securely via the `PGPASSWORD` environment variable in the remote session. + +::: tip Host in SSH Mode +The **Host** field refers to the database hostname **as seen from the SSH server**. If PostgreSQL runs on the same machine as the SSH server, use `127.0.0.1` or `localhost`. +::: + ### Output Format The backup creates a `.sql` file containing: @@ -218,6 +295,28 @@ GRANT SELECT ON LARGE OBJECTS TO dbackup; -- Or use superuser for backup ``` +### SSH: Binary Not Found + +``` +Required binary not found on remote server. Tried: pg_dump +``` + +**Solution:** Install the PostgreSQL client package on the remote server: +```bash +# Ubuntu/Debian +apt-get install postgresql-client + +# RHEL/CentOS +dnf install postgresql +``` + +### SSH: Connection Refused + +**Solution:** +1. Verify SSH is running: `systemctl status sshd` +2. Check SSH port and firewall rules +3. Test manually: `ssh user@host` + ## Restore To restore a PostgreSQL backup: @@ -236,11 +335,8 @@ The restore process can: - Restore to an existing database - Map database names (restore `prod` to `staging`) -## Best Practices +## Next Steps -1. **Use `pg_read_all_data` role** (PostgreSQL 14+) for backup user -2. **Test restores regularly** to verify backup integrity -3. **Enable compression** for large databases -4. **Schedule during maintenance windows** for minimal impact -5. **Consider custom format** (`--format=custom`) for selective restore -6. **Monitor pg_stat_activity** during backup for performance impact +- [Create a Backup Job](/user-guide/jobs/) +- [Enable Encryption](/user-guide/security/encryption) +- [Configure Retention](/user-guide/jobs/retention) diff --git a/wiki/user-guide/sources/redis.md b/wiki/user-guide/sources/redis.md index 16a5c99..337caa9 100644 --- a/wiki/user-guide/sources/redis.md +++ b/wiki/user-guide/sources/redis.md @@ -8,7 +8,14 @@ Redis is an in-memory data structure store used as a database, cache, message br | :--- | | 6.x, 7.x, 8.x | -## Architecure +## Connection Modes + +| Mode | Description | +| :--- | :--- | +| **Direct** | DBackup connects via TCP and runs `redis-cli` locally | +| **SSH** | DBackup connects via SSH and runs `redis-cli` on the remote host | + +## Architecture DBackup uses `redis-cli --rdb` to download RDB snapshots. @@ -19,25 +26,33 @@ DBackup uses `redis-cli --rdb` to download RDB snapshots. ## Configuration -### Basic Settings - -| Field | Description | Default | -| :--- | :--- | :--- | -| **Host** | Redis server hostname or IP | `localhost` | -| **Port** | Redis server port | `6379` | -| **Password** | Optional authentication password | - | -| **Database** | Database index (0-15) for display purposes | `0` | - -### Advanced Settings - -| Field | Description | Default | -| :--- | :--- | :--- | -| **Username** | Redis 6+ ACL username | - | -| **TLS** | Enable TLS/SSL connection | `false` | -| **Mode** | Connection mode: `standalone` or `sentinel` | `standalone` | -| **Sentinel Master Name** | Master name for Sentinel mode | - | -| **Sentinel Nodes** | Comma-separated Sentinel node addresses | - | -| **Additional Options** | Extra `redis-cli` flags | - | +| Field | Description | Default | Required | +| :--- | :--- | :--- | :--- | +| **Connection Mode** | Direct (TCP) or SSH | `Direct` | โœ… | +| **Host** | Redis server hostname or IP | `localhost` | โœ… | +| **Port** | Redis server port | `6379` | โœ… | +| **Password** | Optional authentication password | โ€” | โŒ | +| **Database** | Database index (0-15) for display purposes | `0` | โŒ | +| **Username** | Redis 6+ ACL username | โ€” | โŒ | +| **TLS** | Enable TLS/SSL connection | `false` | โŒ | +| **Mode** | Connection mode: `standalone` or `sentinel` | `standalone` | โŒ | +| **Sentinel Master Name** | Master name for Sentinel mode | โ€” | โŒ | +| **Sentinel Nodes** | Comma-separated Sentinel node addresses | โ€” | โŒ | +| **Additional Options** | Extra `redis-cli` flags | โ€” | โŒ | + +### SSH Mode Fields + +These fields appear when **Connection Mode** is set to **SSH**: + +| Field | Description | Default | Required | +| :--- | :--- | :--- | :--- | +| **SSH Host** | SSH server hostname or IP | โ€” | โœ… | +| **SSH Port** | SSH server port | `22` | โŒ | +| **SSH Username** | SSH login username | โ€” | โœ… | +| **SSH Auth Type** | Password, Private Key, or Agent | `Password` | โœ… | +| **SSH Password** | SSH password | โ€” | โŒ | +| **SSH Private Key** | PEM-formatted private key | โ€” | โŒ | +| **SSH Passphrase** | Passphrase for encrypted key | โ€” | โŒ | ## Example Configuration @@ -121,7 +136,11 @@ Unlike relational databases, Redis uses numbered databases (0-15). When configur ## Required CLI Tools -The Redis adapter requires `redis-cli` to be installed on the DBackup server: +The Redis adapter requires `redis-cli` to be installed. + +### Direct Mode + +`redis-cli` must be available on the DBackup server. **Docker**: Already included in the DBackup image. @@ -137,6 +156,32 @@ brew install redis apk add redis ``` +### SSH Mode + +`redis-cli` must be installed on the **remote SSH server**: + +```bash +# Ubuntu/Debian +apt-get install redis-tools + +# RHEL/CentOS/Fedora +dnf install redis + +# Alpine +apk add redis + +# macOS +brew install redis +``` + +::: danger Important +In SSH mode, the `redis-cli` tool must be installed on the remote server. DBackup executes it remotely via SSH and streams the RDB output back. +::: + +::: tip Host in SSH Mode +The **Host** field refers to the Redis hostname **as seen from the SSH server**. If Redis runs on the same machine as the SSH server, use `127.0.0.1` or `localhost`. +::: + ## Troubleshooting ### Connection Refused @@ -159,6 +204,25 @@ For Redis 6+ with ACL: If using self-signed certificates, you may need to add `--insecure` to the Additional Options field. +### SSH: Binary Not Found + +``` +Required binary not found on remote server. Tried: redis-cli +``` + +**Solution:** Install Redis tools on the remote server: +```bash +# Ubuntu/Debian +apt-get install redis-tools +``` + +### SSH: Connection Refused + +**Solution:** +1. Verify SSH is running: `systemctl status sshd` +2. Check SSH port and firewall rules +3. Test manually: `ssh user@host` + ## See Also - [Storage Explorer](/user-guide/features/storage-explorer) - Browse and download backups From 04e1fe62e46d307b5f4642bc22bfdc80df790773 Mon Sep 17 00:00:00 2001 From: Manu Date: Sat, 28 Mar 2026 14:47:42 +0100 Subject: [PATCH 07/21] Postgres: auto-discover DBs when none selected When no databases are specified for a PostgreSQL backup, dump() now auto-discovers databases via getDatabases(config), logs the discovered list, and errors if none are found. Adds the necessary import and updates the changelog to document the new behavior. --- src/lib/adapters/database/postgres/dump.ts | 11 +++++++++++ wiki/changelog.md | 1 + 2 files changed, 12 insertions(+) diff --git a/src/lib/adapters/database/postgres/dump.ts b/src/lib/adapters/database/postgres/dump.ts index a4d5b7c..e0bc9b9 100644 --- a/src/lib/adapters/database/postgres/dump.ts +++ b/src/lib/adapters/database/postgres/dump.ts @@ -13,6 +13,7 @@ import { } from "../common/tar-utils"; import { TarFileEntry, TarManifest } from "../common/types"; import { PostgresConfig } from "@/lib/adapters/definitions"; +import { getDatabases } from "./connection"; import { SshClient, isSSHMode, @@ -200,6 +201,16 @@ export async function dump( if (db) dbs = [db]; } + // Auto-discover all databases if none specified + if (dbs.length === 0) { + log("No DB selected โ€” auto-discovering all databasesโ€ฆ", "info"); + dbs = await getDatabases(config); + log(`Discovered ${dbs.length} database(s): ${dbs.join(", ")}`, "info"); + if (dbs.length === 0) { + throw new Error("No databases found on the server"); + } + } + const dialect = getDialect('postgres', config.detectedVersion); const pgDumpBinary = await getPostgresBinary('pg_dump', config.detectedVersion); log(`Using ${pgDumpBinary} for PostgreSQL ${config.detectedVersion}`, 'info'); diff --git a/wiki/changelog.md b/wiki/changelog.md index c33b310..9a72753 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -15,6 +15,7 @@ All notable changes to DBackup are documented here. ### ๐Ÿ› Bug Fixes - **mysql**: Backup jobs with no database selected now auto-discover all databases instead of failing with "No database specified" +- **postgres**: Backup jobs with no database selected now auto-discover all databases instead of `pg_dump` defaulting to the username as database name ### ๐ŸŽจ Improvements From de46a493883a8c9ce64561e6cf430013b739c9b6 Mon Sep 17 00:00:00 2001 From: Manu Date: Sun, 29 Mar 2026 14:23:02 +0200 Subject: [PATCH 08/21] Improve SSH DB restores, reliability and UI Handle SSH edge cases and harden remote restores across adapters; add SFTP uploads, better exit/signal handling, log rate-limiting and secret redaction, plus related UI/metadata fixes. Key changes: - Treat null exit codes and include signal info in SSH exec errors for MongoDB, MySQL, Postgres and Redis. - Add SshClient.uploadFile and switch SSH restores (MySQL, Postgres) to upload-then-restore to avoid data loss when piping via exec streams. - Add MySQL-specific hardening: --net-buffer-length on dump, --max-allowed-packet on client, stderr handler that redacts secrets and rate-limits logs, post-failure diagnostics, and safer SSH restore flow. - Consume stdout for MongoDB/MySQL SSH restores to avoid backpressure hangs. - Change remoteEnv to use export statements to avoid leaking secrets in kill reports. - Restore UI: show server-adapter specific target DB name input when db names unknown and pass sourceType metadata for frontend; truncate long adapter names in selects. - Post-dump auto-discovery of DB names and minor runner metadata fixes. - Update changelog and developer guide to document these changes and security improvements. These changes address OOMs, partial uploads, signal handling, noisy logs, and UX gaps during restores. --- src/app/api/storage/[id]/analyze/route.ts | 12 +- .../storage/restore/restore-client.tsx | 31 ++- src/components/dashboard/jobs/job-form.tsx | 12 +- src/lib/adapters/database/mongodb/dump.ts | 4 +- src/lib/adapters/database/mongodb/restore.ts | 6 +- .../database/mysql/dialects/mysql-base.ts | 6 +- src/lib/adapters/database/mysql/dump.ts | 7 +- src/lib/adapters/database/mysql/restore.ts | 192 +++++++++++++++--- src/lib/adapters/database/postgres/dump.ts | 4 +- src/lib/adapters/database/postgres/restore.ts | 23 +-- src/lib/adapters/database/redis/dump.ts | 4 +- src/lib/runner/steps/02-dump.ts | 30 ++- src/lib/ssh/ssh-client.ts | 24 ++- src/lib/ssh/utils.ts | 11 +- wiki/changelog.md | 18 ++ wiki/developer-guide/adapters/database.md | 2 +- 16 files changed, 309 insertions(+), 77 deletions(-) diff --git a/src/app/api/storage/[id]/analyze/route.ts b/src/app/api/storage/[id]/analyze/route.ts index 0b53b8c..c8234dd 100644 --- a/src/app/api/storage/[id]/analyze/route.ts +++ b/src/app/api/storage/[id]/analyze/route.ts @@ -73,10 +73,20 @@ export async function POST(req: NextRequest, props: { params: Promise<{ id: stri if (Array.isArray(meta.databases.names) && meta.databases.names.length > 0) { return NextResponse.json({ databases: meta.databases.names }); } - if (Array.isArray(meta.databases)) { + if (Array.isArray(meta.databases) && meta.databases.length > 0) { return NextResponse.json({ databases: meta.databases }); } } + // For multi-DB TAR archives, return the embedded database list + if (meta.multiDb?.databases?.length > 0) { + return NextResponse.json({ databases: meta.multiDb.databases }); + } + // For server-based adapters (not sqlite) with empty names, + // use the source type to signal the frontend that this is a DB restore + const serverAdapters = ['mysql', 'mariadb', 'postgres', 'mongodb', 'mssql', 'redis']; + if (meta.sourceType && serverAdapters.includes(meta.sourceType.toLowerCase())) { + return NextResponse.json({ databases: [], sourceType: meta.sourceType }); + } } } catch (_e) { // Fallthrough diff --git a/src/app/dashboard/storage/restore/restore-client.tsx b/src/app/dashboard/storage/restore/restore-client.tsx index 6e38a3a..e3c222e 100644 --- a/src/app/dashboard/storage/restore/restore-client.tsx +++ b/src/app/dashboard/storage/restore/restore-client.tsx @@ -73,6 +73,7 @@ export function RestoreClient() { // Advanced Restore State const [analyzedDbs, setAnalyzedDbs] = useState([]); const [dbConfig, setDbConfig] = useState([]); + const [backupSourceType, setBackupSourceType] = useState(""); // Execution State const [restoring, setRestoring] = useState(false); @@ -97,6 +98,10 @@ export function RestoreClient() { const isSystemConfig = file?.sourceType === 'SYSTEM'; + const SERVER_ADAPTERS = ['mysql', 'mariadb', 'postgres', 'mongodb', 'mssql', 'redis']; + const resolvedSourceType = backupSourceType || file?.sourceType || ''; + const isServerAdapter = SERVER_ADAPTERS.includes(resolvedSourceType.toLowerCase()); + const [restoreOptions, setRestoreOptions] = useState({ settings: true, adapters: true, @@ -221,6 +226,9 @@ export function RestoreClient() { if (res.ok) { const data = await res.json(); + if (data.sourceType) { + setBackupSourceType(data.sourceType); + } if (data.databases && data.databases.length > 0) { setAnalyzedDbs(data.databases); setDbConfig(data.databases.map((db: string) => ({ @@ -569,7 +577,9 @@ export function RestoreClient() { {analyzedDbs.length > 0 ? 'Select which databases to restore and configure target names.' - : 'Choose how to restore this backup.'} + : isServerAdapter + ? 'Specify the target database name for the restore.' + : 'Choose how to restore this backup.'}
{analyzedDbs.length > 0 && ( @@ -654,6 +664,25 @@ export function RestoreClient() {
+ ) : isServerAdapter ? ( +
+

+ The database names in this backup could not be determined automatically. + Leave empty to restore into the original database, or specify a target name. +

+
+ + setTargetDbName(e.target.value)} + className="h-8" + /> +

+ If empty, the backup will be restored into its original database. Existing data will be overwritten. +

+
+
) : (
diff --git a/src/components/dashboard/jobs/job-form.tsx b/src/components/dashboard/jobs/job-form.tsx index b75e95e..c14f573 100644 --- a/src/components/dashboard/jobs/job-form.tsx +++ b/src/components/dashboard/jobs/job-form.tsx @@ -294,9 +294,9 @@ export function JobForm({ sources, destinations, notifications, encryptionProfil className={cn("w-full justify-between", !field.value && "text-muted-foreground")} > {field.value ? ( - - s.id === field.value)?.adapterId ?? ""} className="h-4 w-4" /> - {sources.find((s) => s.id === field.value)?.name} + + s.id === field.value)?.adapterId ?? ""} className="h-4 w-4 shrink-0" /> + {sources.find((s) => s.id === field.value)?.name} ) : "Select Source"} @@ -645,9 +645,9 @@ function DestinationRow({ index, form, destinations, usedDestIds, isExpanded, on className={cn("w-full justify-between h-9", !field.value && "text-muted-foreground")} > {currentDest ? ( - - - {currentDest.name} + + + {currentDest.name} ) : "Select Destination"} diff --git a/src/lib/adapters/database/mongodb/dump.ts b/src/lib/adapters/database/mongodb/dump.ts index f6de575..abf9421 100644 --- a/src/lib/adapters/database/mongodb/dump.ts +++ b/src/lib/adapters/database/mongodb/dump.ts @@ -136,9 +136,9 @@ async function dumpSingleDatabaseSSH( if (msg) log(msg, 'info'); }); - stream.on('exit', (code: number) => { + stream.on('exit', (code: number | null, signal?: string) => { if (code === 0) resolve(); - else reject(new Error(`Remote mongodump exited with code ${code}`)); + else reject(new Error(`Remote mongodump exited with code ${code ?? 'null'}${signal ? ` (signal: ${signal})` : ''}`)); }); stream.on('error', (err: Error) => reject(err)); diff --git a/src/lib/adapters/database/mongodb/restore.ts b/src/lib/adapters/database/mongodb/restore.ts index 4366ac0..1ccfdc8 100644 --- a/src/lib/adapters/database/mongodb/restore.ts +++ b/src/lib/adapters/database/mongodb/restore.ts @@ -217,14 +217,16 @@ async function restoreSingleDatabaseSSH( ssh.execStream(cmd, (err, stream) => { if (err) return reject(err); + stream.on('data', () => {}); + stream.stderr.on('data', (data: any) => { const msg = data.toString().trim(); if (msg) log(`[mongorestore] ${msg}`, 'info'); }); - stream.on('exit', (code: number) => { + stream.on('exit', (code: number | null, signal?: string) => { if (code === 0) resolve(); - else reject(new Error(`Remote mongorestore exited with code ${code}`)); + else reject(new Error(`Remote mongorestore exited with code ${code ?? 'null'}${signal ? ` (signal: ${signal})` : ''}`)); }); stream.on('error', (err: Error) => reject(err)); diff --git a/src/lib/adapters/database/mysql/dialects/mysql-base.ts b/src/lib/adapters/database/mysql/dialects/mysql-base.ts index 85c2b4f..0619ec8 100644 --- a/src/lib/adapters/database/mysql/dialects/mysql-base.ts +++ b/src/lib/adapters/database/mysql/dialects/mysql-base.ts @@ -7,7 +7,8 @@ export class MySQLBaseDialect extends BaseDialect { '-h', config.host, '-P', String(config.port), '-u', config.user, - '--protocol=tcp' // Always use TCP to avoid socket issues in containers + '--protocol=tcp', // Always use TCP to avoid socket issues in containers + '--net-buffer-length=16384' // Limit INSERT size to ~16KB to prevent OOM during restore ]; this.appendAuthArgs(args, config); @@ -29,7 +30,8 @@ export class MySQLBaseDialect extends BaseDialect { '-h', config.host, '-P', String(config.port), '-u', config.user, - '--protocol=tcp' + '--protocol=tcp', + '--max-allowed-packet=64M', ]; this.appendAuthArgs(args, config); diff --git a/src/lib/adapters/database/mysql/dump.ts b/src/lib/adapters/database/mysql/dump.ts index f02b212..51ddd11 100644 --- a/src/lib/adapters/database/mysql/dump.ts +++ b/src/lib/adapters/database/mysql/dump.ts @@ -100,6 +100,9 @@ async function dumpSingleDatabaseSSH( const dumpBin = await remoteBinaryCheck(ssh, "mariadb-dump", "mysqldump"); const args = buildMysqlArgs(config); + // Limit INSERT size to ~16KB to prevent OOM during restore on low-memory servers + args.push("--net-buffer-length=16384"); + // Add dump-specific options if ((config as any).options) { args.push(...(config as any).options.split(' ').filter((s: string) => s.trim().length > 0)); @@ -127,9 +130,9 @@ async function dumpSingleDatabaseSSH( onLog(msg); }); - stream.on('exit', (code: number) => { + stream.on('exit', (code: number | null, signal?: string) => { if (code === 0) resolve(); - else reject(new Error(`Remote mysqldump exited with code ${code}`)); + else reject(new Error(`Remote mysqldump exited with code ${code ?? 'null'}${signal ? ` (signal: ${signal})` : ''}`)); }); stream.on('error', (err: Error) => reject(err)); diff --git a/src/lib/adapters/database/mysql/restore.ts b/src/lib/adapters/database/mysql/restore.ts index e5d1b5b..a532248 100644 --- a/src/lib/adapters/database/mysql/restore.ts +++ b/src/lib/adapters/database/mysql/restore.ts @@ -7,6 +7,7 @@ import { getMysqlCommand } from "./tools"; import { spawn } from "child_process"; import { createReadStream } from "fs"; import fs from "fs/promises"; +import { randomUUID } from "crypto"; import path from "path"; import { waitForProcess } from "@/lib/adapters/process"; import { @@ -36,6 +37,85 @@ type MySQLRestoreConfig = (MySQLConfig | MariaDBConfig) & { selectedDatabases?: string[]; }; +const MAX_STDERR_LOG_LINES = 50; +const MAX_STDERR_LINE_LENGTH = 500; + +function createStderrHandler( + onLog: (msg: string, level?: LogLevel, type?: LogType, details?: string) => void, + secrets?: string[] +) { + let stderrCount = 0; + let suppressed = 0; + let buffer = ''; + + // Build redaction list from provided secrets (filter empty/undefined) + const redactList = (secrets || []).filter(s => s && s.length > 0); + + function redact(text: string): string { + let result = text; + for (const secret of redactList) { + // Replace all occurrences of the secret with ****** + while (result.includes(secret)) { + result = result.replace(secret, '******'); + } + } + return result; + } + + return { + handle(data: string) { + // Buffer incoming chunks and split by newlines to get complete lines + buffer += data; + const lines = buffer.split('\n'); + // Keep last incomplete line in buffer + buffer = lines.pop() || ''; + + for (const raw of lines) { + const msg = redact(raw.trim()); + if (!msg || msg.includes("Using a password") || msg.includes("Deprecated program name")) continue; + + // Always log actual MySQL error lines (ERROR xxxx) and separator lines + const isError = /^ERROR\s+\d+/.test(msg); + + if (isError) { + onLog(`MySQL: ${msg}`, 'error'); + continue; + } + + stderrCount++; + if (stderrCount <= MAX_STDERR_LOG_LINES) { + const truncated = msg.length > MAX_STDERR_LINE_LENGTH + ? msg.slice(0, MAX_STDERR_LINE_LENGTH) + '... (truncated)' + : msg; + onLog(`MySQL: ${truncated}`); + } else { + suppressed++; + } + } + }, + flush() { + // Flush remaining buffer + if (buffer.trim()) { + const msg = redact(buffer.trim()); + const isError = /^ERROR\s+\d+/.test(msg); + if (isError) { + onLog(`MySQL: ${msg}`, 'error'); + } else if (stderrCount <= MAX_STDERR_LOG_LINES) { + const truncated = msg.length > MAX_STDERR_LINE_LENGTH + ? msg.slice(0, MAX_STDERR_LINE_LENGTH) + '... (truncated)' + : msg; + onLog(`MySQL: ${truncated}`); + } else { + suppressed++; + } + } + if (suppressed > 0) { + onLog(`MySQL: ... ${suppressed} additional stderr line(s) suppressed`, 'warning'); + } + } + }; +} + export async function prepareRestore(config: MySQLRestoreConfig, databases: string[]): Promise { const usePrivileged = !!config.privilegedAuth; const user = usePrivileged ? config.privilegedAuth!.user : config.user; @@ -92,15 +172,16 @@ async function restoreSingleFile( fileStream.pipe(mysqlProc.stdin); + const stderr = createStderrHandler(onLog); await waitForProcess(mysqlProc, 'mysql', (d) => { - const msg = d.toString().trim(); - if (msg.includes("Using a password") || msg.includes("Deprecated program name")) return; - onLog(`MySQL: ${msg}`); + stderr.handle(d.toString()); }); + stderr.flush(); } /** - * SSH variant: pipe local SQL file to remote mysql client via SSH. + * SSH variant: upload SQL file to remote temp location, then run mysql restore locally. + * Uses upload-then-restore pattern (like PostgreSQL) to avoid SSH channel streaming issues. */ async function restoreSingleFileSSH( config: MySQLRestoreConfig, @@ -118,52 +199,111 @@ async function restoreSingleFileSSH( const ssh = new SshClient(); await ssh.connect(sshConfig); + const remoteTempFile = `/tmp/dbackup_restore_${randomUUID()}.sql`; + try { const mysqlBin = await remoteBinaryCheck(ssh, "mariadb", "mysql"); const args = buildMysqlArgs(config); + args.push("--max-allowed-packet=64M"); args.push(shellEscape(targetDb)); const env: Record = {}; if (config.password) env.MYSQL_PWD = config.password; - const cmd = remoteEnv(env, `${mysqlBin} ${args.join(" ")}`); - onLog(`Restoring to database (SSH): ${targetDb}`, 'info', 'command', `${mysqlBin} ${args.join(" ")}`); + // Pre-restore diagnostics: query server settings + try { + const diagCmd = remoteEnv(env, `${mysqlBin} ${buildMysqlArgs(config).join(" ")} -N -e "SELECT CONCAT('max_allowed_packet=', @@global.max_allowed_packet, ' innodb_buffer_pool_size=', @@global.innodb_buffer_pool_size, ' log_bin=', @@global.log_bin, ' innodb_flush_log_at_trx_commit=', @@global.innodb_flush_log_at_trx_commit)"`); + const diagResult = await ssh.exec(diagCmd); + if (diagResult.code === 0 && diagResult.stdout.trim()) { + onLog(`Server settings: ${diagResult.stdout.trim()}`); + } + } catch { + // Diagnostics are non-critical + } - const fileStream = createReadStream(sourcePath, { highWaterMark: 64 * 1024 }); + // 1. Upload SQL file to remote temp location via SFTP (guarantees data integrity) + onLog(`Uploading dump to remote server via SFTP (${(totalSize / 1024 / 1024).toFixed(1)} MB)...`, 'info'); + await ssh.uploadFile(sourcePath, remoteTempFile); - fileStream.on('data', (chunk) => { - if (onProgress && totalSize > 0) { - processedSize += chunk.length; - const p = Math.round((processedSize / totalSize) * 100); - if (p > lastProgress) { - lastProgress = p; - onProgress(p); - } + // Verify upload integrity + try { + const sizeCheck = await ssh.exec(`stat -c '%s' ${shellEscape(remoteTempFile)} 2>/dev/null || stat -f '%z' ${shellEscape(remoteTempFile)}`); + const remoteSize = parseInt(sizeCheck.stdout.trim(), 10); + if (remoteSize !== totalSize) { + throw new Error(`Upload size mismatch! Local: ${totalSize}, Remote: ${remoteSize}`); } - }); + onLog(`Upload verified: ${(remoteSize / 1024 / 1024).toFixed(1)} MB`); + } catch (e) { + if (e instanceof Error && e.message.includes('mismatch')) throw e; + // stat command failed โ€” non-critical + } + + if (onProgress) onProgress(90); + + // 2. Run mysql restore on the remote server from the uploaded file + const restoreCmd = remoteEnv(env, + `cat ${shellEscape(remoteTempFile)} | ${mysqlBin} ${args.join(" ")}` + ); + onLog(`Restoring to database (SSH): ${targetDb}`, 'info', 'command', `${mysqlBin} ${args.join(" ")}`); await new Promise((resolve, reject) => { - ssh.execStream(cmd, (err, stream) => { + const secrets = [config.password, config.privilegedAuth?.password].filter(Boolean) as string[]; + const stderr = createStderrHandler(onLog, secrets); + + ssh.execStream(restoreCmd, (err, stream) => { if (err) return reject(err); + stream.on('data', () => {}); + stream.stderr.on('data', (data: any) => { - const msg = data.toString().trim(); - if (msg.includes("Using a password") || msg.includes("Deprecated program name")) return; - onLog(`MySQL: ${msg}`); + stderr.handle(data.toString()); }); - stream.on('exit', (code: number) => { - if (code === 0) resolve(); - else reject(new Error(`Remote mysql exited with code ${code}`)); + stream.on('exit', (code: number | null, signal?: string) => { + stderr.flush(); + if (code === 0) { + onProgress?.(100); + resolve(); + } else { + reject(new Error(`Remote mysql exited with code ${code ?? 'null'}${signal ? ` (signal: ${signal})` : ''}`)); + } }); stream.on('error', (err: Error) => reject(err)); - fileStream.on('error', (err: Error) => reject(err)); - - fileStream.pipe(stream); }); }); + } catch (error) { + // Post-failure diagnostics: check if MySQL server is still alive + try { + const aliveCheck = await ssh.exec( + remoteEnv( + { MYSQL_PWD: config.password }, + `${(await remoteBinaryCheck(ssh, "mariadb", "mysql").catch(() => "mysql"))} ${buildMysqlArgs(config).join(" ")} -N -e "SELECT 'alive'" 2>&1` + ) + ); + if (aliveCheck.stdout.includes('alive')) { + onLog(`Post-failure check: MySQL server is still running`, 'warning'); + } else { + onLog(`Post-failure check: MySQL server NOT responding โ€” ${aliveCheck.stderr.trim() || aliveCheck.stdout.trim()}`, 'error'); + } + } catch { + onLog(`Post-failure check: Could not reach MySQL server (likely crashed/OOM-killed)`, 'error'); + } + + // Check for OOM kills on the host + try { + const oomCheck = await ssh.exec(`dmesg 2>/dev/null | grep -i 'oom\\|killed process' | tail -3`); + if (oomCheck.stdout.trim()) { + onLog(`OOM killer detected: ${oomCheck.stdout.trim()}`, 'error'); + } + } catch { + // dmesg might require root + } + + throw error; } finally { + // Cleanup remote temp file + await ssh.exec(`rm -f ${shellEscape(remoteTempFile)}`).catch(() => {}); ssh.end(); } } diff --git a/src/lib/adapters/database/postgres/dump.ts b/src/lib/adapters/database/postgres/dump.ts index e0bc9b9..b75d586 100644 --- a/src/lib/adapters/database/postgres/dump.ts +++ b/src/lib/adapters/database/postgres/dump.ts @@ -152,9 +152,9 @@ async function dumpSingleDatabaseSSH( } }); - stream.on('exit', (code: number) => { + stream.on('exit', (code: number | null, signal?: string) => { if (code === 0) resolve(); - else reject(new Error(`Remote pg_dump for ${dbName} exited with code ${code}`)); + else reject(new Error(`Remote pg_dump for ${dbName} exited with code ${code ?? 'null'}${signal ? ` (signal: ${signal})` : ''}`)); }); stream.on('error', (err: Error) => reject(err)); diff --git a/src/lib/adapters/database/postgres/restore.ts b/src/lib/adapters/database/postgres/restore.ts index 8f7ed19..ecd89af 100644 --- a/src/lib/adapters/database/postgres/restore.ts +++ b/src/lib/adapters/database/postgres/restore.ts @@ -25,7 +25,6 @@ import { shellEscape, } from "@/lib/ssh"; import { randomUUID } from "crypto"; -import { createReadStream } from "fs"; /** * Extended PostgreSQL config for restore operations with runtime fields @@ -250,25 +249,9 @@ async function restoreSingleDatabaseSSH( const pass = (priv && priv.password) ? priv.password : config.password; if (pass) env.PGPASSWORD = pass; - // 1. Upload dump file to remote temp location - log(`Uploading dump to remote: ${remoteTempFile}`, 'info'); - const fileStream = createReadStream(sourcePath); - - await new Promise((resolve, reject) => { - ssh.execStream(`cat > ${shellEscape(remoteTempFile)}`, (err, stream) => { - if (err) return reject(err); - - stream.on('exit', (code: number) => { - if (code === 0) resolve(); - else reject(new Error(`Failed to upload dump file (code ${code})`)); - }); - - stream.on('error', (err: Error) => reject(err)); - fileStream.on('error', (err: Error) => reject(err)); - - fileStream.pipe(stream); - }); - }); + // 1. Upload dump file to remote temp location via SFTP (guarantees data integrity) + log(`Uploading dump to remote via SFTP: ${remoteTempFile}`, 'info'); + await ssh.uploadFile(sourcePath, remoteTempFile); // 2. Run pg_restore on the remote const restoreArgs = [ diff --git a/src/lib/adapters/database/redis/dump.ts b/src/lib/adapters/database/redis/dump.ts index 3c9bc6c..24ca6d6 100644 --- a/src/lib/adapters/database/redis/dump.ts +++ b/src/lib/adapters/database/redis/dump.ts @@ -166,9 +166,9 @@ async function dumpSSH( stream.pipe(writeStream); - stream.on('exit', (code: number) => { + stream.on('exit', (code: number | null, signal?: string) => { if (code === 0) resolve(); - else reject(new Error(`Failed to stream RDB from remote (code ${code})`)); + else reject(new Error(`Failed to stream RDB from remote (code ${code ?? 'null'}${signal ? `, signal: ${signal}` : ''})`)); }); stream.on('error', (err: Error) => reject(err)); diff --git a/src/lib/runner/steps/02-dump.ts b/src/lib/runner/steps/02-dump.ts index aa5aec2..f37b83a 100644 --- a/src/lib/runner/steps/02-dump.ts +++ b/src/lib/runner/steps/02-dump.ts @@ -51,12 +51,11 @@ export async function stepExecuteDump(ctx: RunnerContext) { const isAll = options.includes("--all-databases"); let label = 'Unknown'; - let count: number | string = 'Unknown'; + let count: number = 0; let names: string[] = []; if (isAll) { label = 'All DBs'; - count = 'All'; // Try to fetch DB names for accurate metadata if (sourceAdapter.getDatabases) { try { @@ -88,6 +87,10 @@ export async function stepExecuteDump(ctx: RunnerContext) { label = 'No DB selected'; count = 0; } + } else { + // dbVal is undefined/null (e.g. MongoDB with no specific DB selected) + label = 'No DB selected'; + count = 0; } // Fetch engine version and edition @@ -165,6 +168,29 @@ export async function stepExecuteDump(ctx: RunnerContext) { ctx.dumpSize = dumpResult.size || 0; ctx.log(`Dump successful. Size: ${dumpResult.size} bytes`); + // If metadata has no DB names yet (auto-discovered during dump), fetch them now + if (ctx.metadata && (!ctx.metadata.names || ctx.metadata.names.length === 0)) { + try { + if (sourceAdapter.getDatabases) { + ctx.log(`Attempting post-dump DB discovery...`); + const discovered = await sourceAdapter.getDatabases(sourceConfig); + if (discovered && discovered.length > 0) { + ctx.metadata.names = discovered; + ctx.metadata.count = discovered.length; + ctx.metadata.label = `${discovered.length} DBs (auto-discovered)`; + ctx.log(`Updated metadata with ${discovered.length} auto-discovered database(s): ${discovered.join(', ')}`); + } else { + ctx.log(`Post-dump DB discovery returned no databases`); + } + } else { + ctx.log(`Adapter does not support getDatabases`); + } + } catch (e: unknown) { + const errMsg = e instanceof Error ? e.message : String(e); + ctx.log(`Post-dump DB discovery failed: ${errMsg}`, 'warning'); + } + } + // Check if the dump is a Multi-DB TAR archive and update metadata try { const dumpPath = ctx.tempFile; diff --git a/src/lib/ssh/ssh-client.ts b/src/lib/ssh/ssh-client.ts index e6cdedd..9dc809c 100644 --- a/src/lib/ssh/ssh-client.ts +++ b/src/lib/ssh/ssh-client.ts @@ -1,4 +1,4 @@ -import { Client, ConnectConfig } from "ssh2"; +import { Client, ConnectConfig, SFTPWrapper } from "ssh2"; /** * Generic SSH connection configuration used across all adapters. @@ -67,8 +67,8 @@ export class SshClient { let stderr = ""; stream - .on("close", (code: number, _signal: any) => { - resolve({ stdout, stderr, code }); + .on("close", (code: number | null, signal?: string) => { + resolve({ stdout, stderr, code: code ?? (signal ? 128 : 1) }); }) .on("data", (data: any) => { stdout += data.toString(); @@ -88,6 +88,24 @@ export class SshClient { this.client.exec(command, callback); } + /** + * Upload a local file to the remote server via SFTP. + * Uses SFTP protocol which guarantees data integrity (unlike piping through exec). + */ + public uploadFile(localPath: string, remotePath: string): Promise { + return new Promise((resolve, reject) => { + this.client.sftp((err, sftp) => { + if (err) return reject(new Error(`SFTP session failed: ${err.message}`)); + + sftp.fastPut(localPath, remotePath, (err) => { + sftp.end(); + if (err) return reject(new Error(`SFTP upload failed: ${err.message}`)); + resolve(); + }); + }); + }); + } + public end(): void { this.client.end(); } diff --git a/src/lib/ssh/utils.ts b/src/lib/ssh/utils.ts index 3b43a59..3f0766b 100644 --- a/src/lib/ssh/utils.ts +++ b/src/lib/ssh/utils.ts @@ -9,21 +9,22 @@ export function shellEscape(value: string): string { } /** - * Build a remote command string with environment variables prepended. - * Variables are set inline so they don't leak into the shell history. + * Build a remote command string with environment variables exported before execution. + * Uses `export` statements separated by `;` so that if the main process is killed, + * bash's kill report only shows the command โ€” not the secrets. * * Example: remoteEnv({ MYSQL_PWD: "secret" }, "mysqldump -h 127.0.0.1 mydb") - * โ†’ "MYSQL_PWD='secret' mysqldump -h 127.0.0.1 mydb" + * โ†’ "export MYSQL_PWD='secret'; mysqldump -h 127.0.0.1 mydb" */ export function remoteEnv(vars: Record, command: string): string { const parts: string[] = []; for (const [key, value] of Object.entries(vars)) { if (value !== undefined && value !== "") { - parts.push(`${key}=${shellEscape(value)}`); + parts.push(`export ${key}=${shellEscape(value)}`); } } if (parts.length === 0) return command; - return `${parts.join(" ")} ${command}`; + return `${parts.join("; ")}; ${command}`; } /** diff --git a/wiki/changelog.md b/wiki/changelog.md index 9a72753..3122cd5 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -16,6 +16,24 @@ All notable changes to DBackup are documented here. - **mysql**: Backup jobs with no database selected now auto-discover all databases instead of failing with "No database specified" - **postgres**: Backup jobs with no database selected now auto-discover all databases instead of `pg_dump` defaulting to the username as database name +- **restore**: Restore page no longer shows SQLite-style "Overwrite / Restore as New" UI for server-based adapters โ€” now shows a target database name input when database names are unknown, and auto-discovers database names in backup metadata for future backups +- **ui**: Fixed adapter icon being pushed out of select buttons when source or destination name is too long โ€” names now truncate properly +- **ssh**: Fixed SSH exit code handling across all adapters โ€” `code` can be `null` when process is killed by signal (e.g. SIGPIPE, OOM), now properly handled with signal info in error messages +- **ssh**: Fixed MySQL/MongoDB SSH restore not consuming stdout, which could cause backpressure and hang/crash the remote process +- **restore**: Fixed MySQL SSH restore crashing the Node.js process with OOM (16 GB heap) when restoring large databases โ€” stderr log output is now rate-limited (max 50 messages, 500 chars each) to prevent unbounded memory growth +- **restore**: Fixed MySQL restore via SSH failing with "Server has gone away" on large dumps โ€” `mysql` client now uses `--max-allowed-packet=64M` to handle large legacy INSERT statements +- **restore**: Fixed MySQL SSH restore producing SQL syntax errors when piping large dumps directly through the SSH channel โ€” switched to upload-then-restore pattern (like PostgreSQL): dump file is uploaded to remote temp location first, then `mysql` reads from the local file on the remote server +- **backup**: Fixed MySQL dump producing huge INSERT statements that cause OOM kills on remote servers during restore โ€” `mysqldump` now uses `--net-buffer-length=16384` to limit each INSERT to ~16 KB, and `mysql` client `--max-allowed-packet` reduced from 512M to 64M to minimize client memory allocation +- **restore**: Fixed MySQL SSH restore failing with "Server has gone away" on servers with limited RAM โ€” `mysql` client now uses `--init-command` to disable binary logging (`sql_log_bin=0`) and reduce flush I/O (`innodb_flush_log_at_trx_commit=2`) for the restore session, significantly reducing server memory and disk pressure +- **restore**: Fixed SSH restore file upload losing ~8.7% of data on large files (1.3 GB+) when piping through SSH2's `execStream` via `cat >` โ€” switched to SFTP protocol (`sftp.fastPut()`) for all SSH restore uploads (MySQL, PostgreSQL), which guarantees data integrity + +### ๐Ÿ—‘๏ธ Removed + +- **restore**: Removed session-level SET optimizations (`sql_log_bin=0`, `innodb_flush_log_at_trx_commit=2`) from MySQL restore โ€” the root cause of restore failures was data loss during SSH upload (now fixed by SFTP), not server resource pressure. `innodb_flush_log_at_trx_commit` also caused ERROR 1229 on MariaDB (GLOBAL-only variable). Users can still pass custom options via the source configuration if needed + +### ๐Ÿ”’ Security + +- **ssh**: Fixed database passwords (MYSQL_PWD, PGPASSWORD) being exposed in execution logs when a remote process is killed by OOM or signal โ€” `remoteEnv()` now uses `export` statements instead of inline env var prefix, and the MySQL stderr handler redacts known secrets from all output ### ๐ŸŽจ Improvements diff --git a/wiki/developer-guide/adapters/database.md b/wiki/developer-guide/adapters/database.md index 57ed0c3..5f2dacd 100644 --- a/wiki/developer-guide/adapters/database.md +++ b/wiki/developer-guide/adapters/database.md @@ -178,7 +178,7 @@ Configuration: `readyTimeout: 20000ms`, `keepaliveInterval: 10000ms`, `keepalive | Function | Purpose | | :--- | :--- | | `shellEscape(value)` | Wraps value in single quotes, escapes embedded quotes | -| `remoteEnv(vars, cmd)` | Prepends env vars to a command (e.g., `MYSQL_PWD='...' mysqldump`) | +| `remoteEnv(vars, cmd)` | Exports env vars before a command (e.g., `export MYSQL_PWD='...'; mysqldump`) โ€” uses `export` to prevent password leaking in OOM kill reports | | `remoteBinaryCheck(client, ...candidates)` | Checks if binary exists on remote host, returns resolved path | | `isSSHMode(config)` | Returns `true` if `config.connectionMode === "ssh"` | | `extractSshConfig(config)` | Extracts `SshConnectionConfig` from adapter config with `sshHost` prefix | From 5816c955255c1bfdb35913e7ccb38cfc0f22531f Mon Sep 17 00:00:00 2001 From: Manu Date: Sun, 29 Mar 2026 14:34:05 +0200 Subject: [PATCH 09/21] Use SFTP for SSH restores & verify uploads Switch SSH restore flow for MongoDB and SQLite from piping archives over exec stdin to uploading the file via SFTP and running the restore remotely. Add remote temp filenames (crypto.randomUUID), upload size verification, and remote temp-file cleanup. Remove local stream piping, run mongorestore/sqlite on the remote server using the uploaded file, and surface progress. Update imports and adjust logging/err handling accordingly; update changelog to reflect SFTP switch for MongoDB and SQLite. --- src/lib/adapters/database/mongodb/restore.ts | 34 ++++++-- src/lib/adapters/database/sqlite/restore.ts | 91 ++++++++++++-------- wiki/changelog.md | 2 +- 3 files changed, 82 insertions(+), 45 deletions(-) diff --git a/src/lib/adapters/database/mongodb/restore.ts b/src/lib/adapters/database/mongodb/restore.ts index 1ccfdc8..f8cfcf6 100644 --- a/src/lib/adapters/database/mongodb/restore.ts +++ b/src/lib/adapters/database/mongodb/restore.ts @@ -4,8 +4,10 @@ import { MongoClient } from "mongodb"; import { MongoDBConfig } from "@/lib/adapters/definitions"; import { spawn } from "child_process"; import { createReadStream } from "fs"; +import fs from "fs/promises"; import { waitForProcess } from "@/lib/adapters/process"; import path from "path"; +import { randomUUID } from "crypto"; import { isMultiDbTar, extractSelectedDatabases, @@ -179,7 +181,8 @@ async function restoreSingleDatabase( } /** - * SSH variant: pipe local archive to remote mongorestore via SSH stdin. + * SSH variant: upload archive via SFTP, then run mongorestore on the remote server. + * Uses SFTP protocol which guarantees data integrity (unlike piping through exec stdin). */ async function restoreSingleDatabaseSSH( sourcePath: string, @@ -192,11 +195,13 @@ async function restoreSingleDatabaseSSH( const ssh = new SshClient(); await ssh.connect(sshConfig); + const remoteTempFile = `/tmp/dbackup_mongorestore_${randomUUID()}.archive`; + try { const mongorestoreBin = await remoteBinaryCheck(ssh, "mongorestore"); const args = buildMongoArgs(config); - args.push("--archive"); // read from stdin + args.push(`--archive=${shellEscape(remoteTempFile)}`); args.push("--gzip"); args.push("--drop"); @@ -208,11 +213,27 @@ async function restoreSingleDatabaseSSH( args.push("--nsInclude", shellEscape(`${targetDb}.*`)); } + // 1. Upload archive to remote via SFTP + const totalSize = (await fs.stat(sourcePath)).size; + log(`Uploading archive to remote server via SFTP (${(totalSize / 1024 / 1024).toFixed(1)} MB)...`, 'info'); + await ssh.uploadFile(sourcePath, remoteTempFile); + + // Verify upload integrity + try { + const sizeCheck = await ssh.exec(`stat -c '%s' ${shellEscape(remoteTempFile)} 2>/dev/null || stat -f '%z' ${shellEscape(remoteTempFile)}`); + const remoteSize = parseInt(sizeCheck.stdout.trim(), 10); + if (remoteSize !== totalSize) { + throw new Error(`Upload size mismatch! Local: ${totalSize}, Remote: ${remoteSize}`); + } + log(`Upload verified: ${(remoteSize / 1024 / 1024).toFixed(1)} MB`); + } catch (e) { + if (e instanceof Error && e.message.includes('mismatch')) throw e; + } + + // 2. Run mongorestore on the remote server const cmd = `${mongorestoreBin} ${args.join(" ")}`; log(`Restoring database (SSH)`, 'info', 'command', `mongorestore ${args.join(' ').replace(config.password || '___NONE___', '******')}`); - const fileStream = createReadStream(sourcePath); - await new Promise((resolve, reject) => { ssh.execStream(cmd, (err, stream) => { if (err) return reject(err); @@ -230,12 +251,11 @@ async function restoreSingleDatabaseSSH( }); stream.on('error', (err: Error) => reject(err)); - fileStream.on('error', (err: Error) => reject(err)); - - fileStream.pipe(stream); }); }); } finally { + // Cleanup remote temp file + await ssh.exec(`rm -f ${shellEscape(remoteTempFile)}`).catch(() => {}); ssh.end(); } } diff --git a/src/lib/adapters/database/sqlite/restore.ts b/src/lib/adapters/database/sqlite/restore.ts index 242b8d7..7e25cfa 100644 --- a/src/lib/adapters/database/sqlite/restore.ts +++ b/src/lib/adapters/database/sqlite/restore.ts @@ -3,6 +3,7 @@ import { spawn } from "child_process"; import fs from "fs"; import { SshClient, shellEscape, extractSqliteSshConfig } from "@/lib/ssh"; import { SQLiteConfig } from "@/lib/adapters/definitions"; +import { randomUUID } from "crypto"; export const prepareRestore: DatabaseAdapter["prepareRestore"] = async (_config, _databases) => { // No major prep needed for SQLite mostly, but could check write permissions here @@ -116,52 +117,68 @@ async function restoreSsh(config: SQLiteConfig, sourcePath: string, log: (msg: s await client.connect(sshConfig); log("SSH connection established."); - // Create remote backup and delete original - log("Creating remote backup of existing DB and cleaning up..."); - const escapedPath = shellEscape(dbPath); - const backupCmd = `if [ -f ${escapedPath} ]; then cp ${escapedPath} ${escapedPath}.bak-$(date +%s); rm ${escapedPath}; echo "Backed up and removed old DB"; else echo "No existing DB"; fi`; - await client.exec(backupCmd); + const remoteTempFile = `/tmp/dbackup_sqlite_restore_${randomUUID()}.sql`; - return new Promise(async (resolve, reject) => { - const command = `${shellEscape(binaryPath)} ${escapedPath}`; - log(`Executing remote command: ${binaryPath} ${dbPath}`); - - client.execStream(command, async (err, stream) => { - if (err) { - client.end(); - return reject(err); + try { + // Create remote backup and delete original + log("Creating remote backup of existing DB and cleaning up..."); + const escapedPath = shellEscape(dbPath); + const backupCmd = `if [ -f ${escapedPath} ]; then cp ${escapedPath} ${escapedPath}.bak-$(date +%s); rm ${escapedPath}; echo "Backed up and removed old DB"; else echo "No existing DB"; fi`; + await client.exec(backupCmd); + + // 1. Upload SQL dump to remote via SFTP + const totalSize = (await fs.promises.stat(sourcePath)).size; + log(`Uploading dump to remote server via SFTP (${(totalSize / 1024 / 1024).toFixed(1)} MB)...`); + await client.uploadFile(sourcePath, remoteTempFile); + + // Verify upload integrity + try { + const sizeCheck = await client.exec(`stat -c '%s' ${shellEscape(remoteTempFile)} 2>/dev/null || stat -f '%z' ${shellEscape(remoteTempFile)}`); + const remoteSize = parseInt(sizeCheck.stdout.trim(), 10); + if (remoteSize !== totalSize) { + throw new Error(`Upload size mismatch! Local: ${totalSize}, Remote: ${remoteSize}`); } + log(`Upload verified: ${(remoteSize / 1024 / 1024).toFixed(1)} MB`); + } catch (e) { + if (e instanceof Error && e.message.includes('mismatch')) throw e; + } - // Setup generic read stream with progress - const totalSize = (await fs.promises.stat(sourcePath)).size; - let processed = 0; - const readStream = fs.createReadStream(sourcePath); + if (onProgress) onProgress(50); - if (onProgress) { - readStream.on('data', (chunk) => { - processed += chunk.length; - const percent = Math.round((processed / totalSize) * 100); - onProgress(percent); - }); - } + // 2. Run sqlite3 on the remote server with the uploaded file + const command = `${shellEscape(binaryPath)} ${escapedPath} < ${shellEscape(remoteTempFile)}`; + log(`Executing remote command: ${binaryPath} ${dbPath}`); - readStream.pipe(stream.stdin); + await new Promise((resolve, reject) => { + client.execStream(command, (err, stream) => { + if (err) return reject(err); - stream.stderr.on("data", (data: any) => { - log(`[Remote Stderr]: ${data.toString()}`); - }); + stream.on('data', () => {}); - stream.on("exit", (code: number, _signal: any) => { - client.end(); - if (code === 0) { - log("Remote restore completed successfully."); - resolve({ success: true }); - } else { - reject(new Error(`Remote process exited with code ${code}`)); - } + stream.stderr.on("data", (data: any) => { + log(`[Remote Stderr]: ${data.toString()}`); + }); + + stream.on("exit", (code: number | null, signal?: string) => { + if (code === 0) { + if (onProgress) onProgress(100); + log("Remote restore completed successfully."); + resolve(); + } else { + reject(new Error(`Remote process exited with code ${code ?? 'null'}${signal ? ` (signal: ${signal})` : ''}`)); + } + }); + + stream.on('error', (err: Error) => reject(err)); }); }); - }); + + return { success: true }; + } finally { + // Cleanup remote temp file + await client.exec(`rm -f ${shellEscape(remoteTempFile)}`).catch(() => {}); + client.end(); + } } diff --git a/wiki/changelog.md b/wiki/changelog.md index 3122cd5..e4438c6 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -25,7 +25,7 @@ All notable changes to DBackup are documented here. - **restore**: Fixed MySQL SSH restore producing SQL syntax errors when piping large dumps directly through the SSH channel โ€” switched to upload-then-restore pattern (like PostgreSQL): dump file is uploaded to remote temp location first, then `mysql` reads from the local file on the remote server - **backup**: Fixed MySQL dump producing huge INSERT statements that cause OOM kills on remote servers during restore โ€” `mysqldump` now uses `--net-buffer-length=16384` to limit each INSERT to ~16 KB, and `mysql` client `--max-allowed-packet` reduced from 512M to 64M to minimize client memory allocation - **restore**: Fixed MySQL SSH restore failing with "Server has gone away" on servers with limited RAM โ€” `mysql` client now uses `--init-command` to disable binary logging (`sql_log_bin=0`) and reduce flush I/O (`innodb_flush_log_at_trx_commit=2`) for the restore session, significantly reducing server memory and disk pressure -- **restore**: Fixed SSH restore file upload losing ~8.7% of data on large files (1.3 GB+) when piping through SSH2's `execStream` via `cat >` โ€” switched to SFTP protocol (`sftp.fastPut()`) for all SSH restore uploads (MySQL, PostgreSQL), which guarantees data integrity +- **restore**: Fixed SSH restore file upload losing ~8.7% of data on large files (1.3 GB+) when piping through SSH2's `execStream` via `cat >` โ€” switched to SFTP protocol (`sftp.fastPut()`) for all SSH restore uploads (MySQL, PostgreSQL, MongoDB, SQLite), which guarantees data integrity with upload size verification ### ๐Ÿ—‘๏ธ Removed From 37f40e5dc2d8ab6a75f662ffbaee3dca242065b0 Mon Sep 17 00:00:00 2001 From: Manu Date: Sun, 29 Mar 2026 14:46:38 +0200 Subject: [PATCH 10/21] Add SQLite SSH test UI and SSH fixes Add a "Test SSH Connection" button and handler to the SQLite adapter UI that posts mapped SQLite config to the generic /api/adapters/test-ssh endpoint and shows progress/toasts. Refactor SQLite SSH logic to use shared SSH utilities: extractSqliteSshConfig and remoteBinaryCheck, ensure SSH client connections are always closed with try/finally, and validate SSH config early. Improve getDatabasesWithStats to reuse extracted ssh config and finalize client cleanup. Fix remote dump exit handling to account for nullable exit codes and include signal info in error messages. Clean up unused ssh2 types import and update changelog entries to reflect these changes for reliability and unified SSH behavior across adapters. --- src/components/adapter/form-sections.tsx | 51 +++++++++++++++++- .../adapters/database/sqlite/connection.ts | 52 +++++++++---------- src/lib/adapters/database/sqlite/dump.ts | 4 +- src/lib/ssh/ssh-client.ts | 2 +- wiki/changelog.md | 13 ++--- 5 files changed, 80 insertions(+), 42 deletions(-) diff --git a/src/components/adapter/form-sections.tsx b/src/components/adapter/form-sections.tsx index 54b4ad5..656e1f3 100644 --- a/src/components/adapter/form-sections.tsx +++ b/src/components/adapter/form-sections.tsx @@ -79,9 +79,46 @@ export function DatabaseFormContent({ healthNotificationsDisabled, onHealthNotificationsDisabledChange, }: SectionProps) { - const { watch } = useFormContext(); + const { watch, getValues } = useFormContext(); const mode = watch("config.mode"); const authType = watch("config.authType"); + const [isTestingSqliteSsh, setIsTestingSqliteSsh] = useState(false); + + const testSqliteSshConnection = async () => { + setIsTestingSqliteSsh(true); + const toastId = toast.loading("Testing SSH connection..."); + try { + const config = getValues("config"); + // Map SQLite field names to the generic SSH field names expected by the API + const mappedConfig = { + ...config, + sshHost: config.host, + sshPort: config.port, + sshUsername: config.username, + sshAuthType: config.authType, + sshPassword: config.password, + sshPrivateKey: config.privateKey, + sshPassphrase: config.passphrase, + }; + const res = await fetch("/api/adapters/test-ssh", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ config: mappedConfig }), + }); + const result = await res.json(); + toast.dismiss(toastId); + if (result.success) { + toast.success(result.message || "SSH connection successful"); + } else { + toast.error(result.message || "SSH connection failed"); + } + } catch { + toast.dismiss(toastId); + toast.error("Failed to test SSH connection"); + } finally { + setIsTestingSqliteSsh(false); + } + }; if (adapter.id === "sqlite") { if (!mode) return null; @@ -139,6 +176,18 @@ export function DatabaseFormContent({ {authType === 'privateKey' && ( )} +
+ +
diff --git a/src/lib/adapters/database/sqlite/connection.ts b/src/lib/adapters/database/sqlite/connection.ts index 9ccd439..e147dc3 100644 --- a/src/lib/adapters/database/sqlite/connection.ts +++ b/src/lib/adapters/database/sqlite/connection.ts @@ -3,7 +3,7 @@ import fs from "fs/promises"; import { constants } from "fs"; import { execFile } from "child_process"; import { promisify } from "util"; -import { SshClient, shellEscape, extractSqliteSshConfig } from "@/lib/ssh"; +import { SshClient, shellEscape, extractSqliteSshConfig, remoteBinaryCheck } from "@/lib/ssh"; const execFileAsync = promisify(execFile); @@ -30,35 +30,30 @@ export const test: DatabaseAdapter["test"] = async (config) => { } } else if (mode === "ssh") { + const sshConfig = extractSqliteSshConfig(config); + if (!sshConfig) return { success: false, message: "SSH host and username are required" }; + const client = new SshClient(); try { - await client.connect(config); - - // 1. Check if sqlite3 binary exists on remote - const binaryResult = await client.exec(`${shellEscape(binaryPath)} --version`); - if (binaryResult.code !== 0) { - client.end(); - return { success: false, message: `Remote SQLite3 binary check failed: ${binaryResult.stderr || "Command failed"}` }; - } - const version = binaryResult.stdout.split(' ')[0].trim(); - - // 2. Check if database file exists on remote (using stat) - // We use a simple test: sqlite3 [path] "SELECT 1;" - // Or just `test -f [path]` - - const fileCheck = await client.exec(`test -f ${shellEscape(dbPath)} && echo "exists"`); - if (!fileCheck.stdout.includes("exists")) { - client.end(); - return { success: false, message: `Remote database file at '${dbPath}' not found.` }; - } + await client.connect(sshConfig); + + // 1. Check if sqlite3 binary exists on remote + const resolvedBinary = await remoteBinaryCheck(client, binaryPath); + const versionResult = await client.exec(`${shellEscape(resolvedBinary)} --version`); + const version = versionResult.stdout.split(' ')[0].trim(); - client.end(); - return { success: true, message: "Remote SSH SQLite connection successful.", version }; + // 2. Check if database file exists on remote + const fileCheck = await client.exec(`test -f ${shellEscape(dbPath)} && echo "exists"`); + if (!fileCheck.stdout.includes("exists")) { + return { success: false, message: `Remote database file at '${dbPath}' not found.` }; + } + return { success: true, message: "Remote SSH SQLite connection successful.", version }; } catch (err: unknown) { - client.end(); const message = err instanceof Error ? err.message : String(err); return { success: false, message: `SSH Connection failed: ${message}` }; + } finally { + client.end(); } } @@ -102,11 +97,12 @@ export const getDatabasesWithStats: DatabaseAdapter["getDatabasesWithStats"] = a // Table count is optional, ignore errors } } else if (mode === "ssh") { + const sshConfig = extractSqliteSshConfig(config); + if (!sshConfig) return [{ name, sizeInBytes: undefined, tableCount: undefined }]; + const client = new SshClient(); try { - const sshConfig2 = extractSqliteSshConfig(config); - if (!sshConfig2) return [{ name, sizeInBytes: undefined, tableCount: undefined }]; - await client.connect(sshConfig2); + await client.connect(sshConfig); // Get file size via stat const sizeResult = await client.exec(`stat -c %s ${shellEscape(dbPath)} 2>/dev/null || stat -f %z ${shellEscape(dbPath)} 2>/dev/null`); @@ -127,9 +123,9 @@ export const getDatabasesWithStats: DatabaseAdapter["getDatabasesWithStats"] = a } catch { // Table count is optional } - - client.end(); } catch { + // If stats fail, return name only + } finally { client.end(); } } diff --git a/src/lib/adapters/database/sqlite/dump.ts b/src/lib/adapters/database/sqlite/dump.ts index 0b709ea..5568a7c 100644 --- a/src/lib/adapters/database/sqlite/dump.ts +++ b/src/lib/adapters/database/sqlite/dump.ts @@ -109,7 +109,7 @@ async function dumpSsh(config: SQLiteConfig, destinationPath: string, log: (msg: log(`[Remote Stderr]: ${data.toString()}`); }); - stream.on("exit", (code: number, _signal: any) => { + stream.on("exit", (code: number | null, signal?: string) => { client.end(); if (code === 0) { log("Remote dump completed successfully."); @@ -118,7 +118,7 @@ async function dumpSsh(config: SQLiteConfig, destinationPath: string, log: (msg: else resolve({ success: true, size: stats.size, path: destinationPath }); }); } else { - reject(new Error(`Remote process exited with code ${code}`)); + reject(new Error(`Remote process exited with code ${code ?? 'null'}${signal ? ` (signal: ${signal})` : ''}`)); } }); }); diff --git a/src/lib/ssh/ssh-client.ts b/src/lib/ssh/ssh-client.ts index 9dc809c..04f29da 100644 --- a/src/lib/ssh/ssh-client.ts +++ b/src/lib/ssh/ssh-client.ts @@ -1,4 +1,4 @@ -import { Client, ConnectConfig, SFTPWrapper } from "ssh2"; +import { Client, ConnectConfig } from "ssh2"; /** * Generic SSH connection configuration used across all adapters. diff --git a/wiki/changelog.md b/wiki/changelog.md index e4438c6..5fc1a86 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -11,25 +11,17 @@ All notable changes to DBackup are documented here. - **ssh**: New shared SSH infrastructure (`src/lib/ssh/`) with reusable client, shell escaping, remote binary detection, and per-adapter argument builders - **ssh**: Generic SSH connection test endpoint - "Test SSH" button now works for all SSH-capable adapters, not just MSSQL - **ui**: SSH configuration tab in the source editor for all SSH-capable database adapters (MySQL, MariaDB, PostgreSQL, MongoDB, Redis) with connection mode selector +- **sqlite**: Added "Test SSH Connection" button to the SQLite SSH configuration tab, matching all other SSH-capable adapters ### ๐Ÿ› Bug Fixes - **mysql**: Backup jobs with no database selected now auto-discover all databases instead of failing with "No database specified" - **postgres**: Backup jobs with no database selected now auto-discover all databases instead of `pg_dump` defaulting to the username as database name - **restore**: Restore page no longer shows SQLite-style "Overwrite / Restore as New" UI for server-based adapters โ€” now shows a target database name input when database names are unknown, and auto-discovers database names in backup metadata for future backups -- **ui**: Fixed adapter icon being pushed out of select buttons when source or destination name is too long โ€” names now truncate properly -- **ssh**: Fixed SSH exit code handling across all adapters โ€” `code` can be `null` when process is killed by signal (e.g. SIGPIPE, OOM), now properly handled with signal info in error messages - **ssh**: Fixed MySQL/MongoDB SSH restore not consuming stdout, which could cause backpressure and hang/crash the remote process - **restore**: Fixed MySQL SSH restore crashing the Node.js process with OOM (16 GB heap) when restoring large databases โ€” stderr log output is now rate-limited (max 50 messages, 500 chars each) to prevent unbounded memory growth - **restore**: Fixed MySQL restore via SSH failing with "Server has gone away" on large dumps โ€” `mysql` client now uses `--max-allowed-packet=64M` to handle large legacy INSERT statements -- **restore**: Fixed MySQL SSH restore producing SQL syntax errors when piping large dumps directly through the SSH channel โ€” switched to upload-then-restore pattern (like PostgreSQL): dump file is uploaded to remote temp location first, then `mysql` reads from the local file on the remote server -- **backup**: Fixed MySQL dump producing huge INSERT statements that cause OOM kills on remote servers during restore โ€” `mysqldump` now uses `--net-buffer-length=16384` to limit each INSERT to ~16 KB, and `mysql` client `--max-allowed-packet` reduced from 512M to 64M to minimize client memory allocation -- **restore**: Fixed MySQL SSH restore failing with "Server has gone away" on servers with limited RAM โ€” `mysql` client now uses `--init-command` to disable binary logging (`sql_log_bin=0`) and reduce flush I/O (`innodb_flush_log_at_trx_commit=2`) for the restore session, significantly reducing server memory and disk pressure -- **restore**: Fixed SSH restore file upload losing ~8.7% of data on large files (1.3 GB+) when piping through SSH2's `execStream` via `cat >` โ€” switched to SFTP protocol (`sftp.fastPut()`) for all SSH restore uploads (MySQL, PostgreSQL, MongoDB, SQLite), which guarantees data integrity with upload size verification - -### ๐Ÿ—‘๏ธ Removed - -- **restore**: Removed session-level SET optimizations (`sql_log_bin=0`, `innodb_flush_log_at_trx_commit=2`) from MySQL restore โ€” the root cause of restore failures was data loss during SSH upload (now fixed by SFTP), not server resource pressure. `innodb_flush_log_at_trx_commit` also caused ERROR 1229 on MariaDB (GLOBAL-only variable). Users can still pass custom options via the source configuration if needed +- **backup**: Fixed MySQL dump producing huge INSERT statements that cause OOM kills on remote servers during restore โ€” `mysqldump` now uses `--net-buffer-length=16384` to limit each INSERT to ~16 KB, and `mysql` client `--max-allowed-packet` reduced from 512M to 64M to minimize client memory allocatione ### ๐Ÿ”’ Security @@ -40,6 +32,7 @@ All notable changes to DBackup are documented here. - **ui**: Redesigned source form for SSH-capable adapters โ€” Connection Mode selector now appears first (like SQLite), SSH Connection tab is shown first in SSH mode so users configure SSH before database credentials - **ui**: Sources and Destinations pages now auto-refresh every 10 seconds to keep health status up to date - **sqlite**: Refactored SQLite SSH client into shared SSH module for code reuse across all database adapters +- **sqlite**: SQLite SSH connection test now uses `remoteBinaryCheck()` from the shared SSH library instead of manual binary checks; `try/finally` pattern ensures SSH connections are always closed; exit code null handling fixed in dump ### ๐Ÿ“ Documentation From 2f739215a4e20390022f22b5cf114ff7ef69e0fa Mon Sep 17 00:00:00 2001 From: Manu Date: Sun, 29 Mar 2026 16:24:52 +0200 Subject: [PATCH 11/21] MongoDB: auto-discover DBs and robust SSH parsing Add robust SSH handling and auto-discovery for MongoDB adapters. Use a dedicated logger child for MongoDB connection operations and redact inline passwords in debug logs. Switch mongosh --eval to single-quoted print(JSON.stringify(...)) invocations to avoid bash history expansion and produce machine-friendly JSON output; parse JSON lines from stdout (with a line-based fallback) and include stderr/stdout in error messages. Import getDatabases into the MongoDB dump flow and auto-discover databases when none are selected (with graceful warning on failure). Update the dump runner step to attempt fetching DB names for metadata when an empty selection is provided. Also update changelog entries to reflect the new auto-discovery and mongosh quoting fix. --- .../adapters/database/mongodb/connection.ts | 71 +++++++++++++++---- src/lib/adapters/database/mongodb/dump.ts | 14 ++++ src/lib/runner/steps/02-dump.ts | 58 +++++++++++++-- wiki/changelog.md | 15 ++-- 4 files changed, 130 insertions(+), 28 deletions(-) diff --git a/src/lib/adapters/database/mongodb/connection.ts b/src/lib/adapters/database/mongodb/connection.ts index 75abf99..2f1d708 100644 --- a/src/lib/adapters/database/mongodb/connection.ts +++ b/src/lib/adapters/database/mongodb/connection.ts @@ -8,6 +8,9 @@ import { remoteEnv, remoteBinaryCheck, } from "@/lib/ssh"; +import { logger } from "@/lib/logger"; + +const log = logger.child({ service: "mongodb-connection" }); /** * Build MongoDB connection URI from config @@ -35,7 +38,7 @@ export async function test(config: MongoDBConfig): Promise<{ success: boolean; m const mongoshBin = await remoteBinaryCheck(ssh, "mongosh", "mongo"); const args = buildMongoArgs(config); - const cmd = `${mongoshBin} ${args.join(" ")} --quiet --eval "db.adminCommand({buildInfo:1}).version"`; + const cmd = `${mongoshBin} ${args.join(" ")} --quiet --eval 'print(db.adminCommand({buildInfo:1}).version)'`; const result = await ssh.exec(cmd); if (result.code === 0) { @@ -92,13 +95,32 @@ export async function getDatabases(config: MongoDBConfig): Promise { const mongoshBin = await remoteBinaryCheck(ssh, "mongosh", "mongo"); const args = buildMongoArgs(config); - const cmd = `${mongoshBin} ${args.join(" ")} --quiet --eval "db.adminCommand({listDatabases:1}).databases.map(d=>d.name).join('\\n')"`; + // Output JSON array of DB names โ€” single print(), parsed in Node + const cmd = `${mongoshBin} ${args.join(" ")} --quiet --eval 'print(JSON.stringify(db.adminCommand({listDatabases:1}).databases.map(function(d){return d.name})))'`; + log.debug("getDatabases SSH command", { cmd: cmd.replace(/--password\s+'[^']*'/, "--password '***'") }); const result = await ssh.exec(cmd); + log.debug("getDatabases SSH result", { + code: result.code, + stdout: result.stdout.substring(0, 500), + stderr: result.stderr.substring(0, 500), + }); + if (result.code !== 0) { - throw new Error(`Failed to list databases: ${result.stderr}`); + throw new Error(`Failed to list databases (code ${result.code}): ${result.stderr || result.stdout}`); + } + + // Parse JSON array from stdout โ€” find the line that looks like a JSON array + const lines = result.stdout.split('\n').map(s => s.trim()).filter(Boolean); + const jsonLine = lines.find(l => l.startsWith('[')); + + if (jsonLine) { + const allNames: string[] = JSON.parse(jsonLine); + return allNames.filter(n => !sysDbs.includes(n)); } - return result.stdout.split('\n').map(s => s.trim()).filter(s => s && !sysDbs.includes(s)); + + // Fallback: treat each non-empty line as a DB name + return lines.filter(s => s && !sysDbs.includes(s)); } finally { ssh.end(); } @@ -144,18 +166,40 @@ export async function getDatabasesWithStats(config: MongoDBConfig): Promise!['admin','config','local'].includes(d.name)).forEach(d=>{let c=0;try{c=db.getSiblingDB(d.name).getCollectionNames().length}catch(e){}print(d.name+'\\t'+(d.sizeOnDisk||0)+'\\t'+c)})`; - const cmd = `${mongoshBin} ${args.join(" ")} --quiet --eval "${script}"`; + // Output JSON array with stats โ€” single print(), parsed in Node + // All filtering done in Node to avoid quoting issues in shell + const script = `var r=db.adminCommand({listDatabases:1});var out=[];r.databases.forEach(function(d){var c=0;try{c=db.getSiblingDB(d.name).getCollectionNames().length}catch(e){}out.push({name:d.name,size:Number(d.sizeOnDisk)||0,tables:c})});print(JSON.stringify(out))`; + const cmd = `${mongoshBin} ${args.join(" ")} --quiet --eval '${script}'`; + log.debug("getDatabasesWithStats SSH command", { cmd: cmd.replace(/--password\s+'[^']*'/, "--password '***'") }); const result = await ssh.exec(cmd); + log.debug("getDatabasesWithStats SSH result", { + code: result.code, + stdout: result.stdout.substring(0, 500), + stderr: result.stderr.substring(0, 500), + }); + if (result.code !== 0) { - throw new Error(`Failed to get database stats: ${result.stderr}`); + throw new Error(`Failed to get database stats (code ${result.code}): ${result.stderr || result.stdout}`); } - return result.stdout - .split('\n') - .map(line => line.trim()) - .filter(line => line) + + // Parse JSON array from stdout + const lines = result.stdout.split('\n').map(s => s.trim()).filter(Boolean); + const jsonLine = lines.find(l => l.startsWith('[')); + + if (jsonLine) { + const parsed: Array<{ name: string; size: number; tables: number }> = JSON.parse(jsonLine); + return parsed + .filter(d => !sysDbs.includes(d.name)) + .map(d => ({ + name: d.name, + sizeInBytes: d.size, + tableCount: d.tables, + })); + } + + // Fallback: tab-separated parsing + return lines .map(line => { const [name, sizeStr, tableStr] = line.split('\t'); return { @@ -163,7 +207,8 @@ export async function getDatabasesWithStats(config: MongoDBConfig): Promise d.name && !["admin", "config", "local"].includes(d.name)); } finally { ssh.end(); } diff --git a/src/lib/adapters/database/mongodb/dump.ts b/src/lib/adapters/database/mongodb/dump.ts index abf9421..e35c0d4 100644 --- a/src/lib/adapters/database/mongodb/dump.ts +++ b/src/lib/adapters/database/mongodb/dump.ts @@ -13,6 +13,7 @@ import { } from "../common/tar-utils"; import { TarFileEntry, TarManifest } from "../common/types"; import { MongoDBConfig } from "@/lib/adapters/definitions"; +import { getDatabases } from "./connection"; import { SshClient, isSSHMode, @@ -180,6 +181,19 @@ export async function dump( if (db) dbs = [db]; } + // Discover all databases if none selected (same pattern as MySQL adapter) + if (dbs.length === 0) { + log("No databases selected โ€” backing up all databases"); + try { + dbs = await getDatabases(config); + log(`Found ${dbs.length} database(s): ${dbs.join(', ')}`); + } catch (e: unknown) { + const message = e instanceof Error ? e.message : String(e); + log(`Warning: Could not fetch database list: ${message}`, 'warning'); + // Continue anyway โ€” mongodump without --db dumps all databases + } + } + const dialect = getDialect('mongodb', config.detectedVersion); // Case 1: Single Database or ALL - Direct archive dump diff --git a/src/lib/runner/steps/02-dump.ts b/src/lib/runner/steps/02-dump.ts index f37b83a..1ec3b13 100644 --- a/src/lib/runner/steps/02-dump.ts +++ b/src/lib/runner/steps/02-dump.ts @@ -71,9 +71,27 @@ export async function stepExecuteDump(ctx: RunnerContext) { } } } else if (Array.isArray(dbVal)) { - names = dbVal; - label = `${dbVal.length} DBs`; - count = dbVal.length; + names = dbVal.filter((s: string) => s && s.trim().length > 0); + if (names.length > 0) { + label = `${names.length} DBs`; + count = names.length; + } else { + // Empty array = no DB selected, try to discover all databases + label = 'All DBs'; + if (sourceAdapter.getDatabases) { + try { + const fetched = await sourceAdapter.getDatabases(sourceConfig); + if (fetched && fetched.length > 0) { + names = fetched; + count = names.length; + label = `${names.length} DBs (fetched)`; + } + } catch (e: unknown) { + const message = e instanceof Error ? e.message : String(e); + ctx.log(`Warning: Could not fetch DB list for metadata: ${message}`); + } + } + } } else if (typeof dbVal === 'string') { if (dbVal.includes(',')) { names = dbVal.split(',').map((s: string) => s.trim()).filter((s: string) => s.length > 0); @@ -84,13 +102,39 @@ export async function stepExecuteDump(ctx: RunnerContext) { label = 'Single DB'; count = 1; } else { - label = 'No DB selected'; - count = 0; + // Empty string = no DB selected, try to discover all databases + label = 'All DBs'; + if (sourceAdapter.getDatabases) { + try { + const fetched = await sourceAdapter.getDatabases(sourceConfig); + if (fetched && fetched.length > 0) { + names = fetched; + count = names.length; + label = `${names.length} DBs (fetched)`; + } + } catch (e: unknown) { + const message = e instanceof Error ? e.message : String(e); + ctx.log(`Warning: Could not fetch DB list for metadata: ${message}`); + } + } } } else { // dbVal is undefined/null (e.g. MongoDB with no specific DB selected) - label = 'No DB selected'; - count = 0; + // Try to fetch DB names for accurate metadata (adapter dumps all DBs by default) + label = 'All DBs'; + if (sourceAdapter.getDatabases) { + try { + const fetched = await sourceAdapter.getDatabases(sourceConfig); + if (fetched && fetched.length > 0) { + names = fetched; + count = names.length; + label = `${names.length} DBs (fetched)`; + } + } catch (e: unknown) { + const message = e instanceof Error ? e.message : String(e); + ctx.log(`Warning: Could not fetch DB list for metadata: ${message}`); + } + } } // Fetch engine version and edition diff --git a/wiki/changelog.md b/wiki/changelog.md index 5fc1a86..9beb8a7 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -15,21 +15,20 @@ All notable changes to DBackup are documented here. ### ๐Ÿ› Bug Fixes -- **mysql**: Backup jobs with no database selected now auto-discover all databases instead of failing with "No database specified" -- **postgres**: Backup jobs with no database selected now auto-discover all databases instead of `pg_dump` defaulting to the username as database name -- **restore**: Restore page no longer shows SQLite-style "Overwrite / Restore as New" UI for server-based adapters โ€” now shows a target database name input when database names are unknown, and auto-discovers database names in backup metadata for future backups +- **backup**: MySQL, PostgreSQL, and MongoDB backup jobs with no database selected now auto-discover all databases at runtime - MySQL no longer fails with "No database specified", PostgreSQL no longer defaults to the username as database name, and MongoDB SSH listing was fixed by switching `mongosh --eval` to single quotes to prevent bash `!` history expansion from silently corrupting the command; backup metadata is now correctly populated for restore mapping. +- **restore**: Restore page no longer shows SQLite-style "Overwrite / Restore as New" UI for server-based adapters - now shows a target database name input when database names are unknown, and auto-discovers database names in backup metadata for future backups - **ssh**: Fixed MySQL/MongoDB SSH restore not consuming stdout, which could cause backpressure and hang/crash the remote process -- **restore**: Fixed MySQL SSH restore crashing the Node.js process with OOM (16 GB heap) when restoring large databases โ€” stderr log output is now rate-limited (max 50 messages, 500 chars each) to prevent unbounded memory growth -- **restore**: Fixed MySQL restore via SSH failing with "Server has gone away" on large dumps โ€” `mysql` client now uses `--max-allowed-packet=64M` to handle large legacy INSERT statements -- **backup**: Fixed MySQL dump producing huge INSERT statements that cause OOM kills on remote servers during restore โ€” `mysqldump` now uses `--net-buffer-length=16384` to limit each INSERT to ~16 KB, and `mysql` client `--max-allowed-packet` reduced from 512M to 64M to minimize client memory allocatione +- **restore**: Fixed MySQL SSH restore crashing the Node.js process with OOM (16 GB heap) when restoring large databases - stderr log output is now rate-limited (max 50 messages, 500 chars each) to prevent unbounded memory growth +- **restore**: Fixed MySQL restore via SSH failing with "Server has gone away" on large dumps - `mysql` client now uses `--max-allowed-packet=64M` to handle large legacy INSERT statements +- **backup**: Fixed MySQL dump producing huge INSERT statements that cause OOM kills on remote servers during restore - `mysqldump` now uses `--net-buffer-length=16384` to limit each INSERT to ~16 KB, and `mysql` client `--max-allowed-packet` reduced from 512M to 64M to minimize client memory allocatione ### ๐Ÿ”’ Security -- **ssh**: Fixed database passwords (MYSQL_PWD, PGPASSWORD) being exposed in execution logs when a remote process is killed by OOM or signal โ€” `remoteEnv()` now uses `export` statements instead of inline env var prefix, and the MySQL stderr handler redacts known secrets from all output +- **ssh**: Fixed database passwords (MYSQL_PWD, PGPASSWORD) being exposed in execution logs when a remote process is killed by OOM or signal - `remoteEnv()` now uses `export` statements instead of inline env var prefix, and the MySQL stderr handler redacts known secrets from all output ### ๐ŸŽจ Improvements -- **ui**: Redesigned source form for SSH-capable adapters โ€” Connection Mode selector now appears first (like SQLite), SSH Connection tab is shown first in SSH mode so users configure SSH before database credentials +- **ui**: Redesigned source form for SSH-capable adapters - Connection Mode selector now appears first (like SQLite), SSH Connection tab is shown first in SSH mode so users configure SSH before database credentials - **ui**: Sources and Destinations pages now auto-refresh every 10 seconds to keep health status up to date - **sqlite**: Refactored SQLite SSH client into shared SSH module for code reuse across all database adapters - **sqlite**: SQLite SSH connection test now uses `remoteBinaryCheck()` from the shared SSH library instead of manual binary checks; `try/finally` pattern ensures SSH connections are always closed; exit code null handling fixed in dump From 2227275c91b474ce57c71a2ed09857a90b9a6c00 Mon Sep 17 00:00:00 2001 From: Manu Date: Sun, 29 Mar 2026 16:33:48 +0200 Subject: [PATCH 12/21] Fix download link modal viewport overflow Constrain the Download Link modal height and make its body scrollable to prevent the dialog from overflowing the viewport when a link is generated. Added max-h and overflow handling to DialogContent and wrapped the modal content in an overflow-y-auto container so file info, mode selection and generated URL can scroll independently. Also updated the changelog with a brief UI fix note. --- .../dashboard/storage/download-link-modal.tsx | 120 +++++++++--------- wiki/changelog.md | 1 + 2 files changed, 62 insertions(+), 59 deletions(-) diff --git a/src/components/dashboard/storage/download-link-modal.tsx b/src/components/dashboard/storage/download-link-modal.tsx index 55f5e2e..8d18b66 100644 --- a/src/components/dashboard/storage/download-link-modal.tsx +++ b/src/components/dashboard/storage/download-link-modal.tsx @@ -137,7 +137,7 @@ export function DownloadLinkModal({ return ( - + @@ -148,69 +148,70 @@ export function DownloadLinkModal({ - {/* File Info */} -
-
- -
-
-

- {file.name} -

-
- - {formatBytes(file.size)} - - {file.isEncrypted && ( - - - Encrypted - - )} +
+ {/* File Info */} +
+
+ +
+
+

+ {file.name} +

+
+ + {formatBytes(file.size)} + + {file.isEncrypted && ( + + + Encrypted + + )} +
-
- {/* Mode Selection (only for encrypted files) */} - {file.isEncrypted && ( -
- - { - setMode(v as DownloadMode); - setDownloadUrl(null); // Reset URL when mode changes - }} - className="grid grid-cols-2 gap-3" - > - - - -
- )} + + + +
+ )} - {/* Generate / URL Display */} - {!downloadUrl ? ( + {/* Generate / URL Display */} + {!downloadUrl ? (
)} +
); diff --git a/wiki/changelog.md b/wiki/changelog.md index 9beb8a7..f4b9161 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -21,6 +21,7 @@ All notable changes to DBackup are documented here. - **restore**: Fixed MySQL SSH restore crashing the Node.js process with OOM (16 GB heap) when restoring large databases - stderr log output is now rate-limited (max 50 messages, 500 chars each) to prevent unbounded memory growth - **restore**: Fixed MySQL restore via SSH failing with "Server has gone away" on large dumps - `mysql` client now uses `--max-allowed-packet=64M` to handle large legacy INSERT statements - **backup**: Fixed MySQL dump producing huge INSERT statements that cause OOM kills on remote servers during restore - `mysqldump` now uses `--net-buffer-length=16384` to limit each INSERT to ~16 KB, and `mysql` client `--max-allowed-packet` reduced from 512M to 64M to minimize client memory allocatione +- **ui**: Fixed Download Link modal overflowing the viewport when a link is generated - dialog now has a max height and scrollable body ### ๐Ÿ”’ Security From 90452a4981a1d6b9ab6bf96d8b8ec214137dd8e7 Mon Sep 17 00:00:00 2001 From: Manu Date: Sun, 29 Mar 2026 16:37:44 +0200 Subject: [PATCH 13/21] Add Beta badge to SSH select option Render a small "Beta" badge next to the "SSH" option in the select menu to indicate it's experimental. This is a presentational change in src/components/adapter/schema-field.tsx that replaces the plain "ssh" label with an inline span including styling for the badge; no behavioral changes were made. --- src/components/adapter/schema-field.tsx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/components/adapter/schema-field.tsx b/src/components/adapter/schema-field.tsx index 7e9a907..79bb7e1 100644 --- a/src/components/adapter/schema-field.tsx +++ b/src/components/adapter/schema-field.tsx @@ -173,7 +173,9 @@ export function SchemaField({ {((unwrappedShape as any).options || (unwrappedShape as any)._def?.values || []).map((val: string) => ( - {val === "none" ? "None (Insecure)" : val === "ssl" ? "SSL / TLS" : val === "starttls" ? "STARTTLS" : val === "ssh" ? "SSH" : val} + {val === "none" ? "None (Insecure)" : val === "ssl" ? "SSL / TLS" : val === "starttls" ? "STARTTLS" : val === "ssh" ? ( + SSH Beta + ) : val} ))} From 1ad55ae5e4453dba4d54c999f715b0325375c6b8 Mon Sep 17 00:00:00 2001 From: Manu Date: Sun, 29 Mar 2026 16:46:10 +0200 Subject: [PATCH 14/21] Show skeleton while loading target DBs Show skeleton placeholders and loading text on the Restore page while target databases are being fetched. The restore UI now renders a skeleton row during target DB loading, shows a "Loading target databases..." label in the analysis card, and adds an extra skeleton line. The Restore button is also disabled while target DBs are loading or analysis is running to prevent premature actions. Updated changelog to document the UX improvement. --- .../dashboard/storage/restore/restore-client.tsx | 13 ++++++++++--- wiki/changelog.md | 1 + 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/app/dashboard/storage/restore/restore-client.tsx b/src/app/dashboard/storage/restore/restore-client.tsx index e3c222e..8892cf5 100644 --- a/src/app/dashboard/storage/restore/restore-client.tsx +++ b/src/app/dashboard/storage/restore/restore-client.tsx @@ -539,6 +539,10 @@ export function RestoreClient() { {/* Version Compatibility Check */} + {targetSource && isLoadingTargetDbs && ( + + )} + {targetSource && !isLoadingTargetDbs && targetServerVersion && compatibilityIssues.length === 0 && file?.engineVersion && (
@@ -590,12 +594,15 @@ export function RestoreClient() {
- {isAnalyzing ? ( + {(isAnalyzing || isLoadingTargetDbs) ? (
- +
+
) : analyzedDbs.length > 0 ? ( @@ -886,7 +893,7 @@ export function RestoreClient() { ) : ( + {/* Type badge + connection mode selector */} +
+
+ Type +
+ + {selectedAdapter.name} + + +
+ {/* Mode selector for SQLite */} + {selectedAdapter.id === "sqlite" && ( +
+ >).shape.mode} + adapterId="sqlite" + /> +
+ )} + {/* Connection mode selector for SSH-capable adapters */} + {selectedAdapter.id !== "sqlite" && (selectedAdapter.configSchema as z.ZodObject>).shape?.connectionMode && ( +
+ >).shape.connectionMode} + adapterId={selectedAdapter.id} + /> +
+ )}
- {/* Mode selector for SQLite */} - {selectedAdapter.id === "sqlite" && ( - >).shape.mode} - adapterId="sqlite" - /> - )} - {/* Dynamic form content */} Date: Sun, 29 Mar 2026 17:50:14 +0200 Subject: [PATCH 20/21] Changelog: v1.3.0 released (SSH Remote Exec) Update wiki/changelog.md to mark v1.3.0 as released on March 29, 2026 and add the "SSH Remote Execution" subtitle. Changes release status from "In Progress" to the release date. --- wiki/changelog.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wiki/changelog.md b/wiki/changelog.md index bcc8bd8..baa770d 100644 --- a/wiki/changelog.md +++ b/wiki/changelog.md @@ -2,8 +2,8 @@ All notable changes to DBackup are documented here. -## v1.3.0 -*Release: In Progress* +## v1.3.0 - SSH Remote Execution +*Released: March 29, 2026* ### โœจ Features From 740824b3cd938c75a31117be469df185c5ace4dd Mon Sep 17 00:00:00 2001 From: Manu Date: Sun, 29 Mar 2026 17:51:36 +0200 Subject: [PATCH 21/21] Use Tailwind min-w-32 for tooltip Replace the arbitrary class `min-w-[8rem]` with the standard Tailwind utility `min-w-32` in ChartTooltipContent (src/components/ui/chart.tsx). This aligns the tooltip min-width with the project's spacing scale while preserving the existing visual width (~8rem). --- src/components/ui/chart.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/components/ui/chart.tsx b/src/components/ui/chart.tsx index 8b42f21..c778fb3 100644 --- a/src/components/ui/chart.tsx +++ b/src/components/ui/chart.tsx @@ -173,7 +173,7 @@ function ChartTooltipContent({ return (