diff --git a/CLAUDE.md b/CLAUDE.md index 5feceee..c9461e6 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -88,6 +88,14 @@ Win/macOS: tenbox-manager ──IPC v1──► tenbox-vm-runtime (WHVP / HVF) - **LLM proxy** exists in two places: `src/daemon/llm_proxy.cpp` (Linux) and `src/manager/llm_proxy.cpp` (Windows); change both when the protocol changes. - **RemoteSession** is single-instance per VM. Read `remote_webrtc.cpp`'s `force` takeover path before adding DataChannels. - **macOS Caps Lock forwarding**: send Caps Lock as a tap (`down` then `up`) on each `flagsChanged` event; AppKit exposes it as a toggle state, but the guest input stack needs a full key press for every switch. +- **Agent toolbox**: macOS and Windows desktop managers expose the Agent toolbox without image/rootfs changes. Guest-side toolbox logic lives in `src/agent_tools/guest/agent_tools.sh`; managers copy it into a runtime-only shared folder and execute it with qemu-guest-agent `guest-exec` as user `tenbox`. Keep console-marker paths as fallback only, throttle bulk console input, and never persist temporary Agent share tags into VM manifests. +- **Agent profile and backups**: keep the gzip package format in `docs/agent-profile.md`, include `export_scope`, reject cross-Agent imports, merge imports into the existing Agent home, and exclude reinstallable binaries plus volatile logs/caches/runtime locks including nested `node_modules` and browser `Singleton*` locks. Host backups live under the platform TenBox data dir in `AgentBackups//`, use time-based filenames, tolerate GNU tar live-file churn only when the archive exists and stderr reports `file changed as we read it` or `File removed before we read it`, and rotate by the configured retention count. OpenClaw migration source exports should dereference skill symlinks into regular files while keeping import/migration archive validation strict. +- **Agent toolbox tests**: keep guest script archive-safety, cross-Agent import rejection, import rollback, and OpenClaw migration preflight coverage in `tests/test_agent_tools_guest.sh`; run it through CTest after changing `src/agent_tools/guest/agent_tools.sh`. +- **Agent scheduled backups**: store per-VM/per-Agent schedules in `settings.json` under `agent_backups.schedules`; only run them when the VM is running and guest execution is connected, and surface the last automatic backup attempt in the UI. +- **Agent health and repair**: health, restart, reset, and diagnostics run through guest-exec as user `tenbox`; fail instead of falling back to root if user switching fails, and enforce guest-side timeouts for long commands. Destructive or repair actions must create a host-managed backup first, patch only the necessary config, confirm with the user, and avoid full guest `/tmp` extraction that can exhaust small images. +- **OpenClaw to Hermes migration**: use official `hermes claw migrate` with a separate dry run; pass `--overwrite` to the `claw migrate` subcommand, pass `--migrate-secrets` and `--workspace-target`, map UI conflict choices only to `--skill-conflict`, and never use unsupported Hermes global `--overwrite`. Extract the OpenClaw source beside the shared input package instead of guest `/tmp` so small target disks are not exhausted. Treat `Refusing to apply` as failure, save dry-run/final reports beside Hermes backups, restore TenBox model proxy settings after migration, and only copy compatible Feishu/WeCom env settings best-effort. +- **Agent UI responsiveness**: Agent tool UI defaults to English and switches to Chinese only when the current system language is Chinese. Put destructive/low-frequency actions behind confirmation, run guest-exec and shared-folder IPC off the UI thread, and show compact progress/results while writing full logs/reports to files. +- **macOS app signing**: the app entitlement includes `com.apple.security.cs.disable-library-validation` so the hardened-runtime app can load the bundled Sparkle framework. - **Static build** (`TENBOX_STATIC_FFMPEG=ON`) requires `/opt/tenbox-deps` (only present inside the CI/packaging container). Dev builds use system shared libs — keep `ON` off by default. - **Release**: `docs/release.md` — VERSION bump → commit → push → tag → push tag. Always push commit before tag. diff --git a/CMakeLists.txt b/CMakeLists.txt index a6db10a..c92bc31 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -167,4 +167,5 @@ endif() # macOS manager is built separately via Xcode/SwiftPM (src/manager-macos) +enable_testing() add_subdirectory(tests) diff --git a/docs/agent-profile.md b/docs/agent-profile.md new file mode 100644 index 0000000..92fc752 --- /dev/null +++ b/docs/agent-profile.md @@ -0,0 +1,131 @@ +# Agent Data Tools + +TenBox.app on macOS and `tenbox-manager.exe` on Windows provide Agent data +export/import, backup/restore, migration, and health actions without requiring +Hermes/OpenClaw images to preinstall TenBox-specific scripts. + +The desktop manager creates a temporary shared folder, then sends a short shell +command through qemu-guest-agent `guest-exec`. The command uses standard guest +tools such as `tar`, `gzip`, `systemctl`, `curl`, and `journalctl`. On Windows, +the same flow is exposed through the Agent急救箱 dialog and host files are stored +under `%LOCALAPPDATA%\TenBox`. + +## Profile package + +The exported package is a gzip tar archive: + +```text +--profile.tar.gz +├── manifest.json +└── files.tar.gz +``` + +`manifest.json` contains: + +- `format`: `tenbox-agent-profile` +- `format_version`: `2` +- `agent_type`: `hermes` or `openclaw` +- `export_scope`: `migration` or `backup` +- `archive`: `files.tar.gz` + +`files.tar.gz` contains the Agent data directory relative to the guest home: + +- Hermes: `.hermes` +- OpenClaw: `.openclaw` + +Always excluded paths: + +- Hermes: `.hermes/logs`, `.hermes/image_cache`, `.hermes/audio_cache`, + `.hermes/cache`, `.hermes/hermes-agent`, `.hermes/bin`, `.hermes/gateway.pid`, + `.hermes/gateway.lock` +- OpenClaw: `.openclaw/cache`, `.openclaw/.cache`, `.openclaw/workspace/.cache`, + `.openclaw/logs` + +Migration exports keep secrets, identity, session state, and config files so a +profile can move with the user's full Agent state. Only volatile logs, caches, +runtime lock files, and reinstallable binaries are skipped. + +Import rejects packages whose `agent_type` does not match the selected Agent. +Before replacing existing data, it renames the current directory to +`*.pre-import-YYYYMMDDHHMMSS`. + +## Backups + +Manual backups are created by TenBox.app in: + +```text +~/Library/Application Support/TenBox/AgentBackups/// +``` + +On Windows the equivalent directory is: + +```text +%LOCALAPPDATA%\TenBox\AgentBackups\\\ +``` + +Backups use the same profile package format. Retention is configurable per VM +and Agent; the default keeps the newest seven packages. Restore uses the package +selected in the backup list for the selected VM and Agent. + +Host-managed backups use `export_scope: backup` and keep restorable user state +except volatile logs, caches, runtime lock files, and reinstallable binaries. +They are intended for recovery on the same host, not for sharing. + +## Health actions + +TenBox.app can run these actions while the VM is running: + +- health status +- restart Agent +- reset Agent config +- export diagnostics + +Restart and reset create a backup first, using the same host-managed backup +directory. Diagnostics are exported to the host backup directory through the +temporary shared folder. Both desktop managers also support per-VM/per-Agent +scheduled backups persisted in `settings.json` as `agent_backups.schedules`. + +## OpenClaw to Hermes migration + +When both source and target VMs are running, TenBox.app can migrate OpenClaw +data into a Hermes VM without image-specific helper scripts: + +1. Create a host-managed Hermes backup for the target VM. +2. Mount one runtime-only host shared folder into both VMs. +3. Export the source VM's `~/.openclaw` into that shared folder with full user + state, including secrets, identity, browser profile, and OpenClaw config. +4. Extract it inside the Hermes VM and run the official Hermes CLI: + + ```sh + hermes claw migrate --dry-run --source /.openclaw --preset full --migrate-secrets --overwrite + hermes claw migrate --source /.openclaw --preset full --migrate-secrets --overwrite --skill-conflict skip --yes + ``` + +The migration deliberately uses the `full` preset with `--migrate-secrets` so +Hermes can import every compatible secret and file category its official +OpenClaw migration flow supports. + +TenBox always passes Hermes CLI's global `--overwrite` flag for migration. This +is required for target-level conflicts such as the existing Hermes soul or model +config; the UI conflict strategy controls only imported skills via +`--skill-conflict`. + +After the Hermes CLI succeeds, TenBox reads `.openclaw/openclaw.json` and maps +compatible channel settings into the Hermes environment file: + +- Feishu: `appId`, `appSecret`, `domain`, `connectionMode`, `groupPolicy`, and + optional allowed users become `FEISHU_*` values. +- WeCom: `botId`, `secret`, `dmPolicy`, `groupPolicy`, and optional allowed + users become `WECOM_*` values. + +TenBox also best-effort enables `platforms.feishu` and `platforms.wecom` through +the Hermes CLI. It does not copy plugin install state, pairing/device runtime +state, request de-duplication state, or channel adapter internals; users may +still need to check adapter compatibility after migration. + +After the Hermes CLI imports OpenClaw model metadata, TenBox restores the +running VM's local model proxy settings (`http://10.0.2.3/v1`, API key +`tenbox`) for the primary model and auxiliary compression, vision, and session +search models. Imported provider definitions remain available in +`custom_providers`, but TenBox-managed images should keep routing runtime model +traffic through the host proxy. diff --git a/scripts/build-macos.sh b/scripts/build-macos.sh index 82c671e..ca454e1 100755 --- a/scripts/build-macos.sh +++ b/scripts/build-macos.sh @@ -224,6 +224,10 @@ elif [ -f "$METAL_SRC" ]; then echo " -> Copied Shaders.metal (fallback)" fi +mkdir -p "$APP_DIR/Contents/Resources/AgentTools" +cp "$ROOT_DIR/src/agent_tools/guest/agent_tools.sh" "$APP_DIR/Contents/Resources/AgentTools/agent_tools.sh" +echo " -> Copied Agent tools guest script" + # Copy Sparkle framework from SPM build artifacts (universal xcframework) SPM_SCRATCH_REF="$MANAGER_SRC/.build-arm64" SPARKLE_FRAMEWORK=$(find -L "$SPM_SCRATCH_REF/artifacts" -name "Sparkle.framework" -type d 2>/dev/null | head -1) diff --git a/src/agent_tools/guest/agent_tools.sh b/src/agent_tools/guest/agent_tools.sh new file mode 100755 index 0000000..f2642da --- /dev/null +++ b/src/agent_tools/guest/agent_tools.sh @@ -0,0 +1,428 @@ +#!/bin/sh +set -u + +die() { echo "$*" >&2; exit 1; } + +require_linux() { + os="$(uname -s 2>/dev/null || echo unknown)" + [ "$os" = "Linux" ] || die "Agent tools require a Linux guest OS." +} + +home_dir() { + h="${HOME:-}" + [ -n "$h" ] || h="$(getent passwd tenbox 2>/dev/null | cut -d: -f6 || true)" + [ -n "$h" ] || h="/home/tenbox" + printf '%s\n' "$h" +} + +agent_rel() { + case "$1" in + hermes) echo ".hermes" ;; + openclaw) echo ".openclaw" ;; + *) die "Unsupported agent: $1" ;; + esac +} + +agent_cmd() { + case "$1" in + hermes) command -v hermes 2>/dev/null || true ;; + openclaw) command -v openclaw 2>/dev/null || true ;; + *) true ;; + esac +} + +agent_service() { + case "$1" in + hermes) echo "hermes-agent" ;; + openclaw) echo "openclaw-agent" ;; + *) die "Unsupported agent: $1" ;; + esac +} + +agent_excludes() { + case "$1" in + hermes) echo ".hermes/logs .hermes/tmp .hermes/cache .hermes/sessions .hermes/node_modules .hermes/node_modules/* */node_modules */node_modules/*" ;; + openclaw) echo ".openclaw/logs .openclaw/tmp .openclaw/cache .openclaw/sessions .openclaw/node_modules .openclaw/node_modules/* */node_modules */node_modules/* .openclaw/browser/*/user-data/Singleton* .openclaw/backup" ;; + *) die "Unsupported agent: $1" ;; + esac +} + +validate_archive() { + archive="$1" rel="$2" + command -v python3 >/dev/null 2>&1 || die "python3 is required to validate the archive." + python3 - "$archive" "$rel" <<'PY' +import os, sys, tarfile +archive, rel = sys.argv[1], sys.argv[2].rstrip('/') + '/' +try: + tar = tarfile.open(archive, 'r:gz') +except Exception as exc: + print(f'Invalid archive: {exc}', file=sys.stderr); sys.exit(2) +with tar: + members = tar.getmembers() + if not members: + print('Archive is empty.', file=sys.stderr); sys.exit(2) + for m in members: + name = m.name; norm = os.path.normpath(name) + if name.startswith('/') or norm == '..' or norm.startswith('../'): + print(f'Archive contains unsafe path: {name}', file=sys.stderr); sys.exit(2) + if not (norm == rel.rstrip('/') or norm.startswith(rel)): + print(f'Archive contains unexpected path: {name}', file=sys.stderr); sys.exit(2) + if m.issym() or m.islnk(): + print(f'Archive contains unsupported link: {name}', file=sys.stderr); sys.exit(2) + if not (m.isfile() or m.isdir()): + print(f'Archive contains unsupported entry type: {name}', file=sys.stderr); sys.exit(2) +PY +} + +write_manifest() { + archive="$1" manifest="$2" agent="$3" scope="$4" + command -v python3 >/dev/null 2>&1 || return 0 + python3 - "$archive" "$manifest" "$agent" "$scope" <<'PY' || true +import hashlib, json, os, sys, tarfile, time +archive, manifest, agent, scope = sys.argv[1:5] +sha = hashlib.sha256() +with open(archive, 'rb') as f: + for chunk in iter(lambda: f.read(1024 * 1024), b''): + sha.update(chunk) +try: + with tarfile.open(archive, 'r:gz') as tar: + count = sum(1 for m in tar.getmembers() if m.isfile()) +except Exception: + count = 0 +data = {'agent': agent, 'scope': scope, 'archive': os.path.basename(archive), 'sha256': sha.hexdigest(), 'created_at': int(time.time()), 'file_count': count} +with open(manifest, 'w', encoding='utf-8') as f: + json.dump(data, f, ensure_ascii=False, indent=2) +PY +} + +finalize_file() { + tmp="$1" output="$2" + mv "$tmp" "$output" 2>/dev/null && return 0 + cp "$tmp" "$output" 2>/dev/null && rm -f "$tmp" +} + +create_live_archive() { + archive="$1" + shift + err="${archive}.stderr.$$" + "$@" 2>"$err" + rc=$? + if [ "$rc" -eq 0 ]; then + rm -f "$err" + return 0 + fi + if [ "$rc" -eq 1 ] && [ -s "$archive" ] && awk ' + /file changed as we read it/ || /File removed before we read it/ { seen=1; next } + /^[[:space:]]*$/ { next } + { bad=1 } + END { exit (seen && !bad) ? 0 : 1 } + ' "$err"; then + cat "$err" >&2 + rm -f "$err" + return 0 + fi + cat "$err" >&2 + rm -f "$err" + return "$rc" +} + +create_openclaw_source_archive() { + archive="$1" home="$2" + command -v python3 >/dev/null 2>&1 || die "python3 is required to export OpenClaw source." + python3 - "$archive" "$home" <<'PY' +import os, stat, sys, tarfile + +archive, home = sys.argv[1:3] +root_name = '.openclaw' +root = os.path.join(home, root_name) +if not os.path.isdir(root): + print(f'OpenClaw profile was not found at {root}.', file=sys.stderr) + sys.exit(1) + +def arcname(path): + return os.path.relpath(path, home).replace(os.sep, '/') + +def excluded(name): + parts = name.split('/') + if len(parts) >= 2 and parts[0] == root_name and parts[1] in {'logs', 'tmp', 'cache', 'sessions', 'backup'}: + return True + if 'node_modules' in parts: + return True + if len(parts) >= 5 and parts[0] == root_name and parts[1] == 'browser' and 'user-data' in parts and os.path.basename(name).startswith('Singleton'): + return True + return False + +def add_file(tar, source, name): + try: + info = tar.gettarinfo(source, arcname=name) + if not info.isfile(): + return + with open(source, 'rb') as f: + tar.addfile(info, f) + except FileNotFoundError: + print(f'tar: {name}: File removed before we read it', file=sys.stderr) + except OSError as exc: + print(f'tar: {name}: {exc}', file=sys.stderr) + +def add_path(tar, source, name, seen): + if excluded(name): + return + try: + st = os.lstat(source) + except FileNotFoundError: + print(f'tar: {name}: File removed before we read it', file=sys.stderr) + return + if stat.S_ISLNK(st.st_mode): + try: + real = os.path.realpath(source) + target_st = os.stat(real) + except FileNotFoundError: + print(f'tar: {name}: File removed before we read it', file=sys.stderr) + return + key = (target_st.st_dev, target_st.st_ino) + if key in seen: + return + if stat.S_ISDIR(target_st.st_mode): + add_dir(tar, real, name, seen | {key}) + elif stat.S_ISREG(target_st.st_mode): + add_file(tar, real, name) + return + if stat.S_ISDIR(st.st_mode): + add_dir(tar, source, name, seen) + elif stat.S_ISREG(st.st_mode): + add_file(tar, source, name) + +def add_dir(tar, source, name, seen): + if excluded(name): + return + try: + info = tar.gettarinfo(source, arcname=name) + info.type = tarfile.DIRTYPE + tar.addfile(info) + entries = sorted(os.listdir(source)) + except FileNotFoundError: + print(f'tar: {name}: File removed before we read it', file=sys.stderr) + return + except OSError as exc: + print(f'tar: {name}: {exc}', file=sys.stderr) + return + for entry in entries: + child = os.path.join(source, entry) + add_path(tar, child, f'{name}/{entry}', seen) + +with tarfile.open(archive, 'w:gz') as tar: + add_path(tar, root, root_name, set()) +PY +} + +export_profile() { + agent="$1" output="$2" scope="${3:-backup}" + home="$(home_dir)" rel="$(agent_rel "$agent")" src="$home/$rel" + [ -d "$src" ] || die "$agent profile was not found at $src." + work="$(dirname "$output")/.tenbox-profile-work.$$" + tmp="${output}.tmp.$$" + rm -rf "$work" "$tmp" + mkdir -p "$work" || die "Failed to create profile work directory." + exclude_args="" + for path in $(agent_excludes "$agent"); do exclude_args="$exclude_args --exclude=$path"; done + cat > "$work/manifest.json" </dev/null 2>&1 || { rm -rf "$work"; die "python3 is required to validate the archive."; } + pkg_agent="$(python3 - "$work/manifest.json" <<'PY' 2>/dev/null || true +import json, sys +with open(sys.argv[1], encoding='utf-8') as f: + print(json.load(f).get('agent_type', '')) +PY +)" + [ "$pkg_agent" = "$agent" ] || { rm -rf "$work"; die "Import package belongs to $pkg_agent, not $agent."; } + validate_archive "$work/files.tar.gz" "$rel" || { rm -rf "$work"; die "Invalid profile archive."; } + backup="" + if [ -e "$target" ]; then + backup="$target.before-import-$(date +%Y%m%d-%H%M%S)" + mv "$target" "$backup" || { rm -rf "$work"; die "Failed to preserve existing profile."; } + fi + if ! (cd "$home" && tar -xzf "$work/files.tar.gz"); then + rm -rf "$target" + if [ -n "$backup" ] && [ -e "$backup" ]; then mv "$backup" "$target" || true; fi + rm -rf "$work" + die "Failed to import $agent profile." + fi + rm -rf "$work" + echo "Imported $agent profile from $input." +} + +export_openclaw_source() { + output="$1" + home="$(home_dir)" + src="$home/.openclaw" + [ -d "$src" ] || die "OpenClaw profile was not found at $src." + tmp="${output}.tmp.$$" + rm -f "$tmp" + create_openclaw_source_archive "$tmp" "$home" || { rm -f "$tmp"; die "Failed to export OpenClaw source."; } + validate_archive "$tmp" ".openclaw" || { rm -f "$tmp"; die "Invalid OpenClaw source archive."; } + finalize_file "$tmp" "$output" || { rm -f "$tmp"; die "Failed to finalize OpenClaw source export."; } + echo "Exported OpenClaw source to $output." +} + +health_status() { + agent="$1" cmd="$(agent_cmd "$agent")" service="$(agent_service "$agent")" + echo "Agent: $agent" + [ -n "$cmd" ] && { echo "Command: $cmd"; "$cmd" --version 2>&1 | head -n 1 || true; } || echo "Command: not found" + if command -v systemctl >/dev/null 2>&1; then + state="$(systemctl --user is-active "$service" 2>/dev/null || true)" + [ -n "$state" ] || state="$(systemctl is-active "$service" 2>/dev/null || true)" + [ -n "$state" ] || state="unknown" + echo "Service: $service ($state)" + else + echo "Service: systemctl not available" + fi + config_dir="$(home_dir)/$(agent_rel "$agent")" + [ -d "$config_dir" ] && echo "Config: $config_dir" || echo "Config: missing ($config_dir)" +} + +test_model() { + agent="$1" + curl -fsS --max-time 5 http://10.0.2.3/v1/models >/dev/null 2>&1 && echo "Model proxy is available for $agent." || die "Model proxy is unavailable for $agent." +} + +restart_agent() { + agent="$1" service="$(agent_service "$agent")" + if command -v systemctl >/dev/null 2>&1; then + systemctl --user restart "$service" 2>/dev/null && { echo "Restarted user service $service."; return 0; } + systemctl restart "$service" 2>/dev/null && { echo "Restarted system service $service."; return 0; } + fi + echo "No restartable service was found for $agent." +} + +reset_config() { + agent="$1" home="$(home_dir)" rel="$(agent_rel "$agent")" target="$home/$rel" + [ -e "$target" ] || { echo "$agent config is already absent."; return 0; } + moved="$target.reset-$(date +%Y%m%d-%H%M%S)" + mv "$target" "$moved" || die "Failed to move $target." + echo "Moved $target to $moved." +} + +diagnostics() { + agent="$1" output_dir="$2" + mkdir -p "$output_dir" || die "Failed to create diagnostics directory." + { echo "Agent: $agent"; echo "Generated at: $(date -Iseconds 2>/dev/null || date)"; echo; health_status "$agent"; echo; ps -ef 2>/dev/null | grep -E '(hermes|openclaw)' | grep -v grep || true; echo; df -h 2>/dev/null || true; } > "$output_dir/diagnostics.txt" + if command -v journalctl >/dev/null 2>&1; then + service="$(agent_service "$agent")" + journalctl --user -u "$service" -n 200 --no-pager > "$output_dir/journal-user.log" 2>/dev/null || true + journalctl -u "$service" -n 200 --no-pager > "$output_dir/journal-system.log" 2>/dev/null || true + fi + echo "Diagnostics were written to $output_dir." +} + +restore_tenbox_model_config() { + hermes_cmd="$(agent_cmd hermes)"; [ -n "$hermes_cmd" ] || return 0 + set +e + "$hermes_cmd" config set models.tenbox-default.provider openai >/dev/null 2>&1 + "$hermes_cmd" config set models.tenbox-default.baseURL http://127.0.0.1:7192/v1 >/dev/null 2>&1 + "$hermes_cmd" config set models.tenbox-default.apiKey tenbox >/dev/null 2>&1 + "$hermes_cmd" config set models.tenbox-default.model gpt-4o >/dev/null 2>&1 + "$hermes_cmd" config set defaultModel tenbox-default >/dev/null 2>&1 + "$hermes_cmd" config set model tenbox-default >/dev/null 2>&1 + set -e +} + +configure_channels() { + source_root="$1" home="$(home_dir)" + command -v python3 >/dev/null 2>&1 || return 0 + python3 - "$source_root" "$home/.hermes" <<'PY' || true +import json, os, shutil, sys +source, hermes = sys.argv[1], sys.argv[2] +openclaw = os.path.join(source, '.openclaw') +os.makedirs(hermes, exist_ok=True) +for name in ('feishu', 'lark', 'wechat', 'wecom'): + src = os.path.join(openclaw, name) + dst = os.path.join(hermes, name) + if os.path.isdir(src) and not os.path.exists(dst): shutil.copytree(src, dst) +src_settings = os.path.join(openclaw, 'settings.json') +dst_settings = os.path.join(hermes, 'settings.json') +if not os.path.exists(src_settings): sys.exit(0) +try: source_data = json.load(open(src_settings, encoding='utf-8')) +except Exception: sys.exit(0) +try: target_data = json.load(open(dst_settings, encoding='utf-8')) +except Exception: target_data = {} +for key in ('mcpServers', 'hooks', 'statusLine', 'permissions'): + if key in source_data and key not in target_data: target_data[key] = source_data[key] +json.dump(target_data, open(dst_settings, 'w', encoding='utf-8'), ensure_ascii=False, indent=2) +PY +} + +migrate_openclaw() { + input="$1" report="$2" skill_conflict="$3" workspace_target="$4" mode="$5" + validate_archive "$input" ".openclaw" || die "Invalid OpenClaw source archive." + tmp="$(dirname "$input")/.tenbox-openclaw-migration.$$"; rm -rf "$tmp"; mkdir -p "$tmp" || die "Failed to create temporary directory." + (cd "$tmp" && tar -xmzf "$input") || { rm -rf "$tmp"; die "Failed to unpack OpenClaw source."; } + source_dir="$tmp/.openclaw" + [ -d "$source_dir" ] || { rm -rf "$tmp"; die "Migration package is missing .openclaw."; } + { echo "# OpenClaw to Hermes migration"; echo; echo "- Mode: $mode"; echo "- Generated at: $(date -Iseconds 2>/dev/null || date)"; echo; } > "$report" + hermes_cmd="$(agent_cmd hermes)" + [ -n "$hermes_cmd" ] || die "Target VM is missing the Hermes command." + if [ "$mode" = "dry-run" ]; then + mode_arg="--dry-run" + fail_message="Hermes migration dry run failed. See $report." + else + mode_arg="--yes" + fail_message="Hermes migration failed. See $report." + fi + if [ -n "$workspace_target" ]; then + "$hermes_cmd" claw migrate --overwrite --source "$source_dir" --skill-conflict "$skill_conflict" --workspace-target "$workspace_target" --migrate-secrets "$mode_arg" >> "$report" 2>&1 || die "$fail_message" + else + "$hermes_cmd" claw migrate --overwrite --source "$source_dir" --skill-conflict "$skill_conflict" --migrate-secrets "$mode_arg" >> "$report" 2>&1 || die "$fail_message" + fi + if grep -F "Refusing to apply" "$report" >/dev/null 2>&1; then + rm -rf "$tmp" + die "$fail_message" + fi + if [ "$mode" != "dry-run" ]; then configure_channels "$tmp"; restore_tenbox_model_config; echo "Migration completed." >> "$report"; fi + echo "Migration report was written to $report." + rm -rf "$tmp" +} + +main() { + require_linux + cmd="${1:-}"; [ -n "$cmd" ] || die "Missing command."; shift + case "$cmd" in + export-profile) [ "$#" -ge 2 ] || die "Usage: export-profile AGENT OUTPUT [SCOPE]"; export_profile "$1" "$2" "${3:-backup}" ;; + import-profile) [ "$#" -eq 2 ] || die "Usage: import-profile AGENT INPUT"; import_profile "$1" "$2" ;; + health) [ "$#" -eq 1 ] || die "Usage: health AGENT"; health_status "$1" ;; + test-model) [ "$#" -eq 1 ] || die "Usage: test-model AGENT"; test_model "$1" ;; + restart) [ "$#" -eq 1 ] || die "Usage: restart AGENT"; restart_agent "$1" ;; + reset-config) [ "$#" -eq 1 ] || die "Usage: reset-config AGENT"; reset_config "$1" ;; + diagnostics) [ "$#" -eq 2 ] || die "Usage: diagnostics AGENT OUTPUT_DIR"; diagnostics "$1" "$2" ;; + export-openclaw-source) [ "$#" -eq 1 ] || die "Usage: export-openclaw-source OUTPUT"; export_openclaw_source "$1" ;; + migrate-openclaw-dry-run) [ "$#" -eq 4 ] || die "Usage: migrate-openclaw-dry-run INPUT REPORT SKILL_CONFLICT WORKSPACE_TARGET"; migrate_openclaw "$1" "$2" "$3" "$4" dry-run ;; + migrate-openclaw-apply) [ "$#" -eq 4 ] || die "Usage: migrate-openclaw-apply INPUT REPORT SKILL_CONFLICT WORKSPACE_TARGET"; migrate_openclaw "$1" "$2" "$3" "$4" apply ;; + *) die "Unknown command: $cmd" ;; + esac +} + +main "$@" diff --git a/src/core/guest_agent/guest_agent_handler.cpp b/src/core/guest_agent/guest_agent_handler.cpp index 3f1073b..406a926 100644 --- a/src/core/guest_agent/guest_agent_handler.cpp +++ b/src/core/guest_agent/guest_agent_handler.cpp @@ -1,8 +1,13 @@ #include "core/guest_agent/guest_agent_handler.h" #include "core/device/virtio/virtio_serial.h" #include "core/vmm/types.h" +#include #include +#include #include +#include +#include +#include #include #include @@ -25,6 +30,19 @@ static std::string JsonEscape(const std::string& s) { return out; } +static std::string ShellQuote(const std::string& s) { + std::string out = "'"; + for (char c : s) { + if (c == '\'') { + out += "'\\''"; + } else { + out += c; + } + } + out += "'"; + return out; +} + static int64_t GenerateSyncId() { std::random_device rd; std::mt19937_64 gen(rd()); @@ -47,8 +65,114 @@ static int64_t JsonGetInt(const std::string& json, const std::string& key) { return std::strtoll(json.c_str() + pos, nullptr, 10); } +static std::optional JsonTryGetInt(const std::string& json, const std::string& key) { + std::string needle = "\"" + key + "\":"; + auto pos = json.find(needle); + if (pos == std::string::npos) return std::nullopt; + pos += needle.size(); + while (pos < json.size() && json[pos] == ' ') ++pos; + char* end = nullptr; + int64_t value = std::strtoll(json.c_str() + pos, &end, 10); + if (end == json.c_str() + pos) return std::nullopt; + return value; +} + +static std::optional JsonTryGetBool(const std::string& json, const std::string& key) { + std::string needle = "\"" + key + "\":"; + auto pos = json.find(needle); + if (pos == std::string::npos) return std::nullopt; + pos += needle.size(); + while (pos < json.size() && json[pos] == ' ') ++pos; + if (json.compare(pos, 4, "true") == 0) return true; + if (json.compare(pos, 5, "false") == 0) return false; + return std::nullopt; +} + +static std::string JsonUnescape(const std::string& s) { + std::string out; + out.reserve(s.size()); + bool escaped = false; + for (char c : s) { + if (!escaped) { + if (c == '\\') { + escaped = true; + } else { + out.push_back(c); + } + continue; + } + + switch (c) { + case '"': out.push_back('"'); break; + case '\\': out.push_back('\\'); break; + case '/': out.push_back('/'); break; + case 'b': out.push_back('\b'); break; + case 'f': out.push_back('\f'); break; + case 'n': out.push_back('\n'); break; + case 'r': out.push_back('\r'); break; + case 't': out.push_back('\t'); break; + default: out.push_back(c); break; + } + escaped = false; + } + return out; +} + +static std::optional JsonTryGetString(const std::string& json, const std::string& key) { + std::string needle = "\"" + key + "\":"; + auto pos = json.find(needle); + if (pos == std::string::npos) return std::nullopt; + pos += needle.size(); + while (pos < json.size() && json[pos] == ' ') ++pos; + if (pos >= json.size() || json[pos] != '"') return std::nullopt; + ++pos; + + std::string raw; + bool escaped = false; + for (; pos < json.size(); ++pos) { + char c = json[pos]; + if (!escaped && c == '"') { + return JsonUnescape(raw); + } + if (!escaped && c == '\\') { + escaped = true; + raw.push_back(c); + continue; + } + escaped = false; + raw.push_back(c); + } + return std::nullopt; +} + GuestAgentHandler::GuestAgentHandler() = default; -GuestAgentHandler::~GuestAgentHandler() = default; +GuestAgentHandler::~GuestAgentHandler() { + stopping_ = true; + + std::vector callbacks; + { + std::lock_guard lock(mutex_); + callbacks.reserve(pending_responses_.size()); + for (auto& [_, cb] : pending_responses_) { + callbacks.push_back(std::move(cb)); + } + pending_responses_.clear(); + } + for (auto& cb : callbacks) { + if (cb) cb(R"({"error":{"desc":"guest agent stopped"}})"); + } + + std::vector threads; + { + std::lock_guard lock(exec_threads_mutex_); + threads.swap(exec_threads_); + } + for (auto& thread : threads) { + if (thread.joinable()) { + thread.join(); + } + } +} void GuestAgentHandler::SetSerialDevice(VirtioSerialDevice* device, uint32_t port_id) { serial_device_ = device; @@ -68,11 +192,20 @@ void GuestAgentHandler::OnPortOpen(bool opened) { } else { bool was_connected = connected_.exchange(false); ConnectedCallback cb; + std::vector response_callbacks; { std::lock_guard lock(mutex_); cb = connected_callback_; recv_buffer_.clear(); sync_pending_ = false; + response_callbacks.reserve(pending_responses_.size()); + for (auto& [_, response_cb] : pending_responses_) { + response_callbacks.push_back(std::move(response_cb)); + } + pending_responses_.clear(); + } + for (auto& response_cb : response_callbacks) { + if (response_cb) response_cb(R"({"error":{"desc":"guest agent disconnected"}})"); } if (was_connected && cb) { cb(false); @@ -143,6 +276,7 @@ void GuestAgentHandler::ProcessLine(const std::string& line) { LOG_DEBUG("GuestAgent: recv: %s", line.c_str()); ConnectedCallback cb_to_fire; + ResponseCallback response_cb; { std::lock_guard lock(mutex_); @@ -160,6 +294,15 @@ void GuestAgentHandler::ProcessLine(const std::string& line) { } } + auto id = JsonTryGetInt(line, "id"); + if (id && *id > 0) { + auto it = pending_responses_.find(static_cast(*id)); + if (it != pending_responses_.end()) { + response_cb = std::move(it->second); + pending_responses_.erase(it); + } + } + if (JsonHasKey(line, "error")) { if (sync_pending_) { LOG_DEBUG("GuestAgent: error during sync (expected): %s", line.c_str()); @@ -172,6 +315,9 @@ void GuestAgentHandler::ProcessLine(const std::string& line) { if (cb_to_fire) { cb_to_fire(true); } + if (response_cb) { + response_cb(line); + } } void GuestAgentHandler::SendRaw(const std::string& json_line) { @@ -182,35 +328,198 @@ void GuestAgentHandler::SendRaw(const std::string& json_line) { } void GuestAgentHandler::SendCommand(const std::string& command) { + SendCommandRequest(command, "", nullptr); +} + +void GuestAgentHandler::SendCommand(const std::string& command, + const std::string& arguments_json) { + SendCommandRequest(command, arguments_json, nullptr); +} + +uint64_t GuestAgentHandler::SendCommandRequest(const std::string& command, + const std::string& arguments_json, + ResponseCallback callback) { if (!connected_.load()) { LOG_WARN("GuestAgent: not connected, cannot send %s", command.c_str()); - return; + return 0; + } + + uint64_t id = 0; + { + std::lock_guard lock(mutex_); + id = next_id_++; + if (callback) { + pending_responses_[id] = std::move(callback); + } } - uint64_t id = next_id_++; std::ostringstream oss; oss << R"({"execute":")" << JsonEscape(command) - << R"(","id":)" << id << "}\n"; + << R"(")"; + if (!arguments_json.empty()) { + oss << R"(,"arguments":)" << arguments_json; + } + oss << R"(,"id":)" << id << "}\n"; LOG_INFO("GuestAgent: sending %s (id=%" PRIu64 ")", command.c_str(), id); SendRaw(oss.str()); + return id; } -void GuestAgentHandler::SendCommand(const std::string& command, - const std::string& arguments_json) { +bool GuestAgentHandler::SendCommandSync(const std::string& command, + const std::string& arguments_json, + std::chrono::milliseconds timeout, + std::string* response, + std::string* error) { + struct SyncState { + std::mutex mutex; + std::condition_variable cv; + bool done = false; + std::string response; + }; + + auto state = std::make_shared(); + uint64_t request_id = SendCommandRequest(command, arguments_json, [state](const std::string& line) { + { + std::lock_guard lock(state->mutex); + state->response = line; + state->done = true; + } + state->cv.notify_all(); + }); + if (request_id == 0) { + if (error) *error = "guest agent not connected"; + return false; + } + + auto deadline = std::chrono::steady_clock::now() + timeout; + std::unique_lock lock(state->mutex); + while (!state->done && !stopping_.load()) { + if (state->cv.wait_until(lock, deadline) == std::cv_status::timeout) { + break; + } + } + + if (!state->done) { + std::lock_guard pending_lock(mutex_); + pending_responses_.erase(request_id); + if (error) *error = stopping_.load() ? "guest agent stopped" : "guest agent command timed out"; + return false; + } + + if (response) *response = state->response; + return true; +} + +bool GuestAgentHandler::RunShellCommand(const std::string& command, + const std::string& user, + std::chrono::milliseconds timeout, + ExecCallback callback) { if (!connected_.load()) { - LOG_WARN("GuestAgent: not connected, cannot send %s", command.c_str()); + return false; + } + std::lock_guard lock(exec_threads_mutex_); + exec_threads_.emplace_back( + [this, command, user, timeout, callback = std::move(callback)]() mutable { + RunShellCommandWorker(command, user, timeout, std::move(callback)); + }); + return true; +} + +void GuestAgentHandler::RunShellCommandWorker(const std::string& command, + const std::string& user, + std::chrono::milliseconds timeout, + ExecCallback callback) { + ExecResult result; + auto finish = [&](ExecResult r) { + if (callback) callback(std::move(r)); + }; + + std::string exec_command = command; + if (!user.empty()) { + const std::string quoted_user = ShellQuote(user); + const std::string quoted_command = ShellQuote(command); + const std::string missing_user = ShellQuote("guest user not found: " + user); + const std::string switch_error = ShellQuote("cannot switch guest user: " + user); + exec_command = + "if ! id " + quoted_user + " >/dev/null 2>&1; then printf '%s\\n' " + missing_user + " >&2; exit 126; fi; " + "if command -v runuser >/dev/null 2>&1; then exec runuser -l " + quoted_user + " -c " + quoted_command + "; fi; " + "if command -v su >/dev/null 2>&1; then exec su -s /bin/sh " + quoted_user + " -c " + quoted_command + "; fi; " + "printf '%s\\n' " + switch_error + " >&2; exit 126"; + } + + const auto timeout_seconds = std::max( + 1, std::chrono::duration_cast(timeout).count()); + const std::string quoted_exec_command = ShellQuote(exec_command); + exec_command = + "if command -v timeout >/dev/null 2>&1; then " + "exec timeout -k 5s " + std::to_string(timeout_seconds) + "s /bin/sh -lc " + quoted_exec_command + "; " + "fi; " + "/bin/sh -lc " + quoted_exec_command + " & __tenbox_child=$!; " + "( sleep " + std::to_string(timeout_seconds) + "; " + "kill -TERM \"$__tenbox_child\" >/dev/null 2>&1 || true; " + "sleep 5; kill -KILL \"$__tenbox_child\" >/dev/null 2>&1 || true ) & __tenbox_watchdog=$!; " + "wait \"$__tenbox_child\"; __tenbox_rc=$?; " + "kill \"$__tenbox_watchdog\" >/dev/null 2>&1 || true; " + "wait \"$__tenbox_watchdog\" 2>/dev/null || true; " + "exit \"$__tenbox_rc\""; + + const std::string args = + R"({"path":"/bin/sh","arg":["-lc",")" + JsonEscape(exec_command) + + R"("],"capture-output":true})"; + + std::string response; + std::string error; + if (!SendCommandSync("guest-exec", args, std::chrono::seconds(10), &response, &error)) { + result.error = error.empty() ? "failed to start guest command" : error; + finish(std::move(result)); + return; + } + if (auto desc = JsonTryGetString(response, "desc")) { + result.error = *desc; + finish(std::move(result)); + return; + } + auto pid = JsonTryGetInt(response, "pid"); + if (!pid || *pid <= 0) { + result.error = "guest agent did not return a command pid"; + finish(std::move(result)); return; } - uint64_t id = next_id_++; - std::ostringstream oss; - oss << R"({"execute":")" << JsonEscape(command) - << R"(","arguments":)" << arguments_json - << R"(,"id":)" << id << "}\n"; + auto deadline = std::chrono::steady_clock::now() + timeout; + while (!stopping_.load() && std::chrono::steady_clock::now() < deadline) { + std::string status_response; + std::string status_error; + std::string status_args = R"({"pid":)" + std::to_string(*pid) + "}"; + if (!SendCommandSync("guest-exec-status", status_args, + std::chrono::seconds(10), &status_response, &status_error)) { + result.error = status_error.empty() ? "failed to read guest command status" : status_error; + finish(std::move(result)); + return; + } + if (auto desc = JsonTryGetString(status_response, "desc")) { + result.error = *desc; + finish(std::move(result)); + return; + } - LOG_INFO("GuestAgent: sending %s (id=%" PRIu64 ")", command.c_str(), id); - SendRaw(oss.str()); + auto exited = JsonTryGetBool(status_response, "exited"); + if (exited && *exited) { + result.ok = true; + result.exited = true; + result.exit_code = static_cast(JsonTryGetInt(status_response, "exitcode").value_or(0)); + result.out_data = JsonTryGetString(status_response, "out-data").value_or(""); + result.err_data = JsonTryGetString(status_response, "err-data").value_or(""); + finish(std::move(result)); + return; + } + + std::this_thread::sleep_for(std::chrono::milliseconds(250)); + } + + result.error = stopping_.load() ? "guest agent stopped" : "guest command timed out"; + finish(std::move(result)); } void GuestAgentHandler::Shutdown(const std::string& mode) { diff --git a/src/core/guest_agent/guest_agent_handler.h b/src/core/guest_agent/guest_agent_handler.h index b22c644..09e62ca 100644 --- a/src/core/guest_agent/guest_agent_handler.h +++ b/src/core/guest_agent/guest_agent_handler.h @@ -1,10 +1,12 @@ #pragma once #include +#include #include #include #include #include +#include #include #include @@ -13,6 +15,15 @@ class VirtioSerialDevice; class GuestAgentHandler { public: using ConnectedCallback = std::function; + struct ExecResult { + bool ok = false; + bool exited = false; + int exit_code = -1; + std::string out_data; + std::string err_data; + std::string error; + }; + using ExecCallback = std::function; GuestAgentHandler(); ~GuestAgentHandler(); @@ -37,10 +48,30 @@ class GuestAgentHandler { // Sync guest wall clock to host (QGA guest-set-time, nanoseconds since epoch) void SyncTime(); + // Execute a shell command through qemu-guest-agent guest-exec. + bool RunShellCommand(const std::string& command, + const std::string& user, + std::chrono::milliseconds timeout, + ExecCallback callback); + private: + using ResponseCallback = std::function; + void SendCommand(const std::string& command); void SendCommand(const std::string& command, const std::string& arguments_json); + uint64_t SendCommandRequest(const std::string& command, + const std::string& arguments_json, + ResponseCallback callback); + bool SendCommandSync(const std::string& command, + const std::string& arguments_json, + std::chrono::milliseconds timeout, + std::string* response, + std::string* error); + void RunShellCommandWorker(const std::string& command, + const std::string& user, + std::chrono::milliseconds timeout, + ExecCallback callback); void SendRaw(const std::string& json_line); void ProcessLine(const std::string& line); void StartSyncHandshake(); @@ -55,4 +86,9 @@ class GuestAgentHandler { int64_t sync_id_ = 0; uint64_t next_id_ = 1; ConnectedCallback connected_callback_; + std::unordered_map pending_responses_; + + std::atomic stopping_{false}; + std::mutex exec_threads_mutex_; + std::vector exec_threads_; }; diff --git a/src/manager-macos/Bridge/IpcClientWrapper.swift b/src/manager-macos/Bridge/IpcClientWrapper.swift index 64839e0..ab69b55 100644 --- a/src/manager-macos/Bridge/IpcClientWrapper.swift +++ b/src/manager-macos/Bridge/IpcClientWrapper.swift @@ -3,6 +3,7 @@ import TenBoxBridge class IpcClientWrapper: ObservableObject { private let client = TBIpcClient() + private let sendQueue = DispatchQueue(label: "tenbox.ipc.send", qos: .userInitiated) @Published var isConnected = false // Display: (pixelBytes, pixelLength, dirtyW, dirtyH, stride, resourceW, resourceH, dirtyX, dirtyY) @@ -25,6 +26,7 @@ class IpcClientWrapper: ObservableObject { // VM state var onRuntimeState: ((String) -> Void)? var onGuestAgentState: ((Bool) -> Void)? + var onGuestExecResult: ((UInt64, Bool, Int32, String, String, String?) -> Void)? // Host-forward errors (host ports that failed to bind) var onHostForwardError: (([String]) -> Void)? @@ -73,6 +75,20 @@ class IpcClientWrapper: ObservableObject { _ = client.sendSyncTimeCommand() } + func sendGuestExec(command: String, user: String, requestId: UInt64, timeoutMs: UInt32) -> Bool { + client.sendGuestExecCommand(command, user: user, requestId: requestId, timeoutMs: timeoutMs) + } + + func sendGuestExecAsync(command: String, user: String, requestId: UInt64, timeoutMs: UInt32, + completion: @escaping (Bool) -> Void) { + sendQueue.async { [client] in + let sent = client.sendGuestExecCommand(command, user: user, requestId: requestId, timeoutMs: timeoutMs) + DispatchQueue.main.async { + completion(sent) + } + } + } + func sendKey(code: UInt16, pressed: Bool) { client.sendKeyEvent(code, pressed: pressed) } @@ -110,7 +126,9 @@ class IpcClientWrapper: ObservableObject { } func sendSharedFoldersUpdate(entries: [String]) { - client.sendSharedFoldersUpdate(entries) + sendQueue.async { [client, entries] in + client.sendSharedFoldersUpdate(entries) + } } func sendNetworkUpdate(hostfwdEntries: [String], guestfwdEntries: [String], netEnabled: Bool) { @@ -152,6 +170,9 @@ class IpcClientWrapper: ObservableObject { guestAgentStateHandler: { [weak self] connected in self?.onGuestAgentState?(connected) }, + guestExecResultHandler: { [weak self] requestId, ok, exitCode, stdoutText, stderrText, error in + self?.onGuestExecResult?(requestId, ok, exitCode, stdoutText, stderrText, error) + }, displayStateHandler: { [weak self] active, w, h in self?.onDisplayState?(active, w, h) }, diff --git a/src/manager-macos/Bridge/Sources/TenBoxIPC.mm b/src/manager-macos/Bridge/Sources/TenBoxIPC.mm index 81aa76c..ff0cb20 100644 --- a/src/manager-macos/Bridge/Sources/TenBoxIPC.mm +++ b/src/manager-macos/Bridge/Sources/TenBoxIPC.mm @@ -50,6 +50,18 @@ return out; } +static NSString* DecodeBase64Utf8(const std::string& value) { + if (value.empty()) return @""; + NSString* b64 = [NSString stringWithUTF8String:value.c_str()]; + NSData* data = [[NSData alloc] initWithBase64EncodedString:b64 options:0]; + if (!data) return @""; + NSString* text = [[NSString alloc] initWithData:data encoding:NSUTF8StringEncoding]; + if (!text) { + text = [[NSString alloc] initWithData:data encoding:NSISOLatin1StringEncoding]; + } + return text ?: @""; +} + @implementation TBIpcClient { std::unique_ptr _connection; std::mutex _sendLock; @@ -132,7 +144,8 @@ - (BOOL)sendControlCommand:(NSString *)command { msg.channel = ipc::Channel::kControl; msg.kind = ipc::Kind::kRequest; msg.type = "runtime.command"; - msg.fields["command"] = command.UTF8String; + std::string raw = command.UTF8String; + msg.fields["command_hex"] = HexEncode(raw); std::lock_guard lock(_sendLock); std::string encoded = ipc::Encode(msg); @@ -144,6 +157,25 @@ - (BOOL)sendSyncTimeCommand { return [self sendControlCommand:@"sync-time"]; } +- (BOOL)sendGuestExecCommand:(NSString *)command user:(NSString *)user requestId:(uint64_t)requestId timeoutMs:(uint32_t)timeoutMs { + if (!_connection || !_connection->IsValid()) return NO; + + ipc::Message msg; + msg.channel = ipc::Channel::kControl; + msg.kind = ipc::Kind::kRequest; + msg.type = "runtime.guest_exec"; + msg.request_id = requestId; + std::string raw = command.UTF8String; + msg.fields["command_hex"] = HexEncode(raw); + if (user.length > 0) { + msg.fields["user"] = user.UTF8String; + } + msg.fields["timeout_ms"] = std::to_string(timeoutMs); + + std::lock_guard lock(_sendLock); + return _connection->Send(ipc::Encode(msg)); +} + #pragma mark - Send: Input - (BOOL)sendKeyEvent:(uint16_t)code pressed:(BOOL)pressed { @@ -354,6 +386,7 @@ - (void)startReceiveLoopWithFrameHandler:(void (^)(const void *, size_t, uint32_ clipboardRequestHandler:(void (^)(uint32_t))clipboardRequestHandler runtimeStateHandler:(void (^)(NSString *))runtimeStateHandler guestAgentStateHandler:(void (^)(BOOL))guestAgentStateHandler + guestExecResultHandler:(void (^)(uint64_t, BOOL, int32_t, NSString *, NSString *, NSString * _Nullable))guestExecResultHandler displayStateHandler:(void (^)(BOOL, uint32_t, uint32_t))displayStateHandler disconnectHandler:(void (^)(void))disconnectHandler { if (_recvThread.joinable()) { @@ -373,6 +406,7 @@ - (void)startReceiveLoopWithFrameHandler:(void (^)(const void *, size_t, uint32_ typedef void (^ClipReqBlock)(uint32_t); typedef void (^StateBlock)(NSString *); typedef void (^BoolBlock)(BOOL); + typedef void (^GuestExecBlock)(uint64_t, BOOL, int32_t, NSString *, NSString *, NSString * _Nullable); typedef void (^DispStateBlock)(BOOL, uint32_t, uint32_t); typedef void (^VoidBlock)(void); @@ -385,10 +419,11 @@ - (void)startReceiveLoopWithFrameHandler:(void (^)(const void *, size_t, uint32_ ClipReqBlock crH = [clipboardRequestHandler copy]; StateBlock rsH = [runtimeStateHandler copy]; BoolBlock gaH = [guestAgentStateHandler copy]; + GuestExecBlock geH = [guestExecResultHandler copy]; DispStateBlock dsH = [displayStateHandler copy]; VoidBlock dh = [disconnectHandler copy]; - _recvThread = std::thread([self, fh, cuH, ah, coh, cgH, cdH, crH, rsH, gaH, dsH, dh] { + _recvThread = std::thread([self, fh, cuH, ah, coh, cgH, cdH, crH, rsH, gaH, geH, dsH, dh] { // Streaming parser — mirrors the Windows DispatchPipeData approach. // One large read buffer, parse header lines + payloads in-place. std::string pending; @@ -409,7 +444,7 @@ - (void)startReceiveLoopWithFrameHandler:(void (^)(const void *, size_t, uint32_ payload_needed = 0; auto& msg = pending_msg; - [self dispatchMsg:msg fh:fh cuH:cuH ah:ah coh:coh cgH:cgH cdH:cdH crH:crH rsH:rsH gaH:gaH dsH:dsH]; + [self dispatchMsg:msg fh:fh cuH:cuH ah:ah coh:coh cgH:cgH cdH:cdH crH:crH rsH:rsH gaH:gaH geH:geH dsH:dsH]; continue; } @@ -432,7 +467,7 @@ - (void)startReceiveLoopWithFrameHandler:(void (^)(const void *, size_t, uint32_ } auto& msg = *decoded; - [self dispatchMsg:msg fh:fh cuH:cuH ah:ah coh:coh cgH:cgH cdH:cdH crH:crH rsH:rsH gaH:gaH dsH:dsH]; + [self dispatchMsg:msg fh:fh cuH:cuH ah:ah coh:coh cgH:cgH cdH:cdH crH:crH rsH:rsH gaH:gaH geH:geH dsH:dsH]; } }; @@ -466,6 +501,7 @@ - (void)dispatchMsg:(ipc::Message&)msg crH:(void (^)(uint32_t))crH rsH:(void (^)(NSString *))rsH gaH:(void (^)(BOOL))gaH + geH:(void (^)(uint64_t, BOOL, int32_t, NSString *, NSString *, NSString * _Nullable))geH dsH:(void (^)(BOOL, uint32_t, uint32_t))dsH { auto getU32 = [&](const char* key) -> uint32_t { @@ -635,6 +671,36 @@ - (void)dispatchMsg:(ipc::Message&)msg IPC_DEBUG_LOG(@"[IPC] << %s guest_agent.state connected=%d", GetTimestamp().c_str(), connected); dispatch_async(dispatch_get_main_queue(), ^{ gaH(connected); }); } + else if (msg.type == "runtime.guest_exec.result") { + BOOL ok = false; + int32_t exitCode = -1; + NSString* outText = @""; + NSString* errText = @""; + NSString* errorText = nil; + + auto okIt = msg.fields.find("ok"); + ok = (okIt != msg.fields.end() && okIt->second == "true"); + auto codeIt = msg.fields.find("exit_code"); + if (codeIt != msg.fields.end()) { + exitCode = static_cast(std::strtol(codeIt->second.c_str(), nullptr, 10)); + } + auto outIt = msg.fields.find("out_b64"); + if (outIt != msg.fields.end()) { + outText = DecodeBase64Utf8(outIt->second); + } + auto errIt = msg.fields.find("err_b64"); + if (errIt != msg.fields.end()) { + errText = DecodeBase64Utf8(errIt->second); + } + auto errorIt = msg.fields.find("error"); + if (errorIt != msg.fields.end()) { + errorText = [NSString stringWithUTF8String:errorIt->second.c_str()]; + } + uint64_t reqId = msg.request_id; + dispatch_async(dispatch_get_main_queue(), ^{ + geH(reqId, ok, exitCode, outText, errText, errorText); + }); + } else if (msg.type == "display.state") { auto ai = msg.fields.find("active"); auto wi = msg.fields.find("width"); diff --git a/src/manager-macos/Bridge/VmConfigStore.swift b/src/manager-macos/Bridge/VmConfigStore.swift index eb7afa7..4b39c4b 100644 --- a/src/manager-macos/Bridge/VmConfigStore.swift +++ b/src/manager-macos/Bridge/VmConfigStore.swift @@ -23,6 +23,10 @@ class VmConfigStore { private let decoder = JSONDecoder() + private static func isAgentToolSharedFolderTag(_ tag: String) -> Bool { + tag.hasPrefix("tenbox-agent-ops-") || tag.hasPrefix("tenbox-agent-backups-") + } + // MARK: - Paths func vmDirectory(for vmId: String) -> URL { @@ -47,9 +51,24 @@ class VmConfigStore { config.kernelPath = resolve(config.kernelPath) config.initrdPath = resolve(config.initrdPath) config.diskPath = resolve(config.diskPath) + config.sharedFolders.removeAll { Self.isAgentToolSharedFolderTag($0.tag) } return config } + func purgeAgentToolSharedFolders() { + let fm = FileManager.default + guard let items = try? fm.contentsOfDirectory(atPath: Self.vmsDirectory.path) else { return } + for item in items { + let url = configURL(for: item) + guard let data = try? Data(contentsOf: url), + var config = try? decoder.decode(VmConfig.self, from: data) else { continue } + let oldCount = config.sharedFolders.count + config.sharedFolders.removeAll { Self.isAgentToolSharedFolderTag($0.tag) } + guard config.sharedFolders.count != oldCount else { continue } + _ = writeConfig(vmId: item, config: config) + } + } + @discardableResult func writeConfig(vmId: String, config: VmConfig) -> Bool { guard let data = try? encoder.encode(config) else { return false } diff --git a/src/manager-macos/Bridge/include/TenBoxIPC.h b/src/manager-macos/Bridge/include/TenBoxIPC.h index 8efd703..c9f56f4 100644 --- a/src/manager-macos/Bridge/include/TenBoxIPC.h +++ b/src/manager-macos/Bridge/include/TenBoxIPC.h @@ -21,6 +21,9 @@ NS_ASSUME_NONNULL_BEGIN /// Push host wall time to guest (qemu-ga guest-set-time) when guest agent is connected. - (BOOL)sendSyncTimeCommand; +/// Execute a shell command through qemu-guest-agent guest-exec. +- (BOOL)sendGuestExecCommand:(NSString *)command user:(NSString *)user requestId:(uint64_t)requestId timeoutMs:(uint32_t)timeoutMs; + // Input events (forwarded to virtio-input) - (BOOL)sendKeyEvent:(uint16_t)code pressed:(BOOL)pressed; - (BOOL)sendPointerAbsolute:(int32_t)x y:(int32_t)y buttons:(uint32_t)buttons; @@ -62,6 +65,7 @@ NS_ASSUME_NONNULL_BEGIN clipboardRequestHandler:(void (^)(uint32_t dataType))clipboardRequestHandler runtimeStateHandler:(void (^)(NSString *state))runtimeStateHandler guestAgentStateHandler:(void (^)(BOOL connected))guestAgentStateHandler + guestExecResultHandler:(void (^)(uint64_t requestId, BOOL ok, int32_t exitCode, NSString *stdoutText, NSString *stderrText, NSString * _Nullable error))guestExecResultHandler displayStateHandler:(void (^)(BOOL active, uint32_t width, uint32_t height))displayStateHandler disconnectHandler:(void (^)(void))disconnectHandler; - (void)stopReceiveLoop; diff --git a/src/manager-macos/Package.swift b/src/manager-macos/Package.swift index fd1a055..0d5c623 100644 --- a/src/manager-macos/Package.swift +++ b/src/manager-macos/Package.swift @@ -45,7 +45,9 @@ let package = Package( "Bridge/VmProcessManager.swift", "Services/ImageSourceService.swift", "Services/LlmProxyService.swift", + "Services/AgentToolsService.swift", "Views/LlmProxyView.swift", + "Views/AgentToolsView.swift", ], resources: [ .copy("Resources/icon.png"), diff --git a/src/manager-macos/Resources/TenBox.entitlements b/src/manager-macos/Resources/TenBox.entitlements index 6f6d171..d267311 100644 --- a/src/manager-macos/Resources/TenBox.entitlements +++ b/src/manager-macos/Resources/TenBox.entitlements @@ -4,6 +4,8 @@ com.apple.security.hypervisor + com.apple.security.cs.disable-library-validation + com.apple.security.app-sandbox diff --git a/src/manager-macos/Services/AgentToolsService.swift b/src/manager-macos/Services/AgentToolsService.swift new file mode 100644 index 0000000..9a691d5 --- /dev/null +++ b/src/manager-macos/Services/AgentToolsService.swift @@ -0,0 +1,707 @@ +import Foundation + +private enum AgentLocale { + static var isChinese: Bool { + Locale.preferredLanguages.first?.lowercased().hasPrefix("zh") == true + } +} + +func AgentText(_ english: String, _ chinese: String) -> String { + AgentLocale.isChinese ? chinese : english +} + +enum AgentKind: String, CaseIterable, Identifiable { + case hermes + case openclaw + + var id: String { rawValue } + + var displayName: String { + switch self { + case .hermes: return "Hermes" + case .openclaw: return "OpenClaw" + } + } +} + +struct ConsoleCommandResult { + let exitCode: Int32 + let output: String +} + +struct AgentToolResult { + let message: String + let output: String +} + +enum AgentSkillConflictStrategy: String, CaseIterable, Identifiable { + case skip + case overwrite + case rename + + var id: String { rawValue } + + var displayName: String { + switch self { + case .skip: return AgentText("Keep Hermes skills", "技能保留 Hermes") + case .overwrite: return AgentText("Overwrite Hermes skills", "技能覆盖 Hermes") + case .rename: return AgentText("Rename imported skills", "技能重命名导入") + } + } + + var help: String { + switch self { + case .skip: return AgentText("Keep existing Hermes skills when names conflict; target-level config conflicts follow Hermes migration rules.", "遇到同名技能时保留目标 Hermes 版本;目标级配置冲突会按 Hermes 迁移规则覆盖") + case .overwrite: return AgentText("Overwrite conflicting Hermes skills with OpenClaw versions; target-level config conflicts follow Hermes migration rules.", "遇到同名技能时使用 OpenClaw 版本覆盖;目标级配置冲突会按 Hermes 迁移规则覆盖") + case .rename: return AgentText("Import conflicting OpenClaw skills under renamed copies; target-level config conflicts follow Hermes migration rules.", "遇到同名技能时将 OpenClaw 版本重命名导入;目标级配置冲突会按 Hermes 迁移规则覆盖") + } + } +} + +struct AgentMigrationOptions: Equatable { + var skillConflictStrategy: AgentSkillConflictStrategy = .skip + var workspaceTarget: String = "/home/tenbox/.hermes/workspace/openclaw-migrated" +} + +enum AgentMigrationStep: String { + case backup + case exportSource + case dryRun + case migrate + case restart + case health + case complete + + var title: String { + switch self { + case .backup: return AgentText("Back up Hermes", "备份 Hermes") + case .exportSource: return AgentText("Export OpenClaw", "导出 OpenClaw") + case .dryRun: return AgentText("Check migration plan", "检查迁移计划") + case .migrate: return AgentText("Run migration", "执行迁移") + case .restart: return AgentText("Restart Hermes", "重启 Hermes") + case .health: return AgentText("Health check", "健康检查") + case .complete: return AgentText("Complete", "完成") + } + } +} + +struct AgentMigrationProgress: Identifiable, Equatable { + let id = UUID() + let step: AgentMigrationStep + let message: String + let detail: String? + let date: Date + + init(step: AgentMigrationStep, message: String, detail: String? = nil, date: Date = Date()) { + self.step = step + self.message = message + self.detail = detail + self.date = date + } +} + +struct AgentBackupPackage: Identifiable, Equatable { + let url: URL + let modifiedAt: Date + let sizeBytes: Int64 + + var id: String { url.path } + var filename: String { url.lastPathComponent } +} + +struct AgentBackupSchedule: Codable, Equatable { + static let defaultHour = 3 + static let defaultMinute = 0 + static let defaultKeepCount = 7 + + var enabled: Bool + var hour: Int + var minute: Int + var keepCount: Int + var lastRunDate: String? + var lastAttemptAt: String? + var lastAttemptStatus: String? + var lastAttemptMessage: String? + + init(enabled: Bool = false, + hour: Int = Self.defaultHour, + minute: Int = Self.defaultMinute, + keepCount: Int = Self.defaultKeepCount, + lastRunDate: String? = nil, + lastAttemptAt: String? = nil, + lastAttemptStatus: String? = nil, + lastAttemptMessage: String? = nil) { + self.enabled = enabled + self.hour = min(max(hour, 0), 23) + self.minute = min(max(minute, 0), 59) + self.keepCount = min(max(keepCount, 1), 99) + self.lastRunDate = lastRunDate + self.lastAttemptAt = lastAttemptAt + self.lastAttemptStatus = lastAttemptStatus + self.lastAttemptMessage = lastAttemptMessage + } + + var timeText: String { + String(format: "%02d:%02d", hour, minute) + } +} + +struct ConsoleCommandError: LocalizedError { + let errorDescription: String? + + init(_ message: String) { + self.errorDescription = message + } +} + +final class AgentToolsService { + private let fileManager = FileManager.default + + func exportProfile(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + destinationURL: URL, + completion: @escaping (Result) -> Void) { + withOperationShare(vmId: vm.id, appState: appState) { share, cleanup in + do { + try self.prepareAgentToolScript(share: share) + let packageName = destinationURL.lastPathComponent.isEmpty + ? "\(vm.name)-\(agent.rawValue)-profile.tar.gz" + : destinationURL.lastPathComponent + let guestPackage = "/mnt/shared/\(share.tag)/\(packageName)" + let command = Self.scriptCommand(tag: share.tag, args: ["export-profile", agent.rawValue, guestPackage, "migration"]) + session.runGuestAgentCommand(command, timeout: 420) { result in + switch result { + case .success(let commandResult): + guard commandResult.exitCode == 0 else { + cleanup() + completion(.failure(Self.makeToolError(commandResult, fallback: AgentText("Agent export failed", "Agent 数据导出失败")))) + return + } + let hostPackage = URL(fileURLWithPath: share.hostPath).appendingPathComponent(packageName) + do { + if self.fileManager.fileExists(atPath: destinationURL.path) { try self.fileManager.removeItem(at: destinationURL) } + try self.fileManager.copyItem(at: hostPackage, to: destinationURL) + cleanup() + completion(.success(AgentToolResult(message: AgentText("Exported to \(destinationURL.path)", "已导出到 \(destinationURL.path)"), output: commandResult.output))) + } catch { + cleanup() + completion(.failure(error)) + } + case .failure(let error): + cleanup() + completion(.failure(Self.localizedGuestError(error))) + } + } + } catch { + cleanup() + completion(.failure(error)) + } + } failure: { completion(.failure($0)) } + } + + func importProfile(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + sourceURL: URL, + completion: @escaping (Result) -> Void) { + withOperationShare(vmId: vm.id, appState: appState) { share, cleanup in + do { + try self.prepareAgentToolScript(share: share) + let packageName = "tenbox-agent-profile-import.tar.gz" + let hostPackage = URL(fileURLWithPath: share.hostPath).appendingPathComponent(packageName) + if self.fileManager.fileExists(atPath: hostPackage.path) { try self.fileManager.removeItem(at: hostPackage) } + try self.fileManager.copyItem(at: sourceURL, to: hostPackage) + let guestPackage = "/mnt/shared/\(share.tag)/\(packageName)" + let command = Self.scriptCommand(tag: share.tag, args: ["import-profile", agent.rawValue, guestPackage]) + session.runGuestAgentCommand(command, timeout: 420) { result in + cleanup() + switch result { + case .success(let commandResult): + guard commandResult.exitCode == 0 else { + completion(.failure(Self.makeToolError(commandResult, fallback: AgentText("Agent import failed", "Agent 数据导入失败")))) + return + } + completion(.success(AgentToolResult(message: AgentText("Agent data imported", "已导入 Agent 数据"), output: commandResult.output))) + case .failure(let error): + completion(.failure(Self.localizedGuestError(error))) + } + } + } catch { + cleanup() + completion(.failure(error)) + } + } failure: { completion(.failure($0)) } + } + + func backupStatus(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + completion: @escaping (Result) -> Void) { + do { + let latest = try latestBackupPackage(vmId: vm.id, agent: agent) + completion(.success(AgentToolResult( + message: latest == nil ? AgentText("No backups yet", "还没有备份") : AgentText("Agent data is protected", "Agent 数据已保护"), + output: latest == nil ? AgentText("Create the first backup with Back Up Now.", "点击“立即备份”创建第一份备份。") : AgentText("Latest backup: \(latest!.path)", "最近备份:\(latest!.path)") + ))) + } catch { completion(.failure(error)) } + } + + func snapshotBackup(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + keepCount: Int = AgentBackupSchedule.defaultKeepCount, + completion: @escaping (Result) -> Void) { + do { + let package = try backupPackageURL(vmId: vm.id, agent: agent) + withBackupShare(vmId: vm.id, appState: appState) { share, cleanup in + do { + try self.prepareAgentToolScript(share: share) + let guestDir = "/mnt/shared/\(share.tag)/\(agent.rawValue)" + let guestPackage = "\(guestDir)/\(package.lastPathComponent)" + let command = Self.withSharedFolderReady(tag: share.tag, body: "mkdir -p \(Self.shellQuote(guestDir))\n" + Self.scriptInvocation(tag: share.tag, args: ["export-profile", agent.rawValue, guestPackage, "backup"])) + session.runGuestAgentCommand(command, timeout: 420) { result in + cleanup() + switch result { + case .success(let commandResult): + guard commandResult.exitCode == 0 else { + completion(.failure(Self.makeToolError(commandResult, fallback: AgentText("Agent backup failed", "Agent 备份失败")))) + return + } + self.rotateBackups(vmId: vm.id, agent: agent, keep: keepCount) + completion(.success(AgentToolResult(message: AgentText("Agent data backup created", "已创建 Agent 数据备份"), output: package.path))) + case .failure(let error): + completion(.failure(Self.localizedGuestError(error))) + } + } + } catch { + cleanup() + completion(.failure(error)) + } + } failure: { completion(.failure($0)) } + } catch { completion(.failure(error)) } + } + + func restoreBackup(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + packageURL: URL, + completion: @escaping (Result) -> Void) { + withBackupShare(vmId: vm.id, appState: appState) { share, cleanup in + do { + try self.prepareAgentToolScript(share: share) + let guestPackage = "/mnt/shared/\(share.tag)/\(agent.rawValue)/\(packageURL.lastPathComponent)" + let command = Self.scriptCommand(tag: share.tag, args: ["import-profile", agent.rawValue, guestPackage]) + session.runGuestAgentCommand(command, timeout: 420) { result in + cleanup() + switch result { + case .success(let commandResult): + guard commandResult.exitCode == 0 else { + completion(.failure(Self.makeToolError(commandResult, fallback: AgentText("Agent backup restore failed", "Agent 备份恢复失败")))) + return + } + completion(.success(AgentToolResult(message: AgentText("Agent data backup restored", "已恢复 Agent 数据备份"), output: packageURL.path))) + case .failure(let error): + completion(.failure(Self.localizedGuestError(error))) + } + } + } catch { + cleanup() + completion(.failure(error)) + } + } failure: { completion(.failure($0)) } + } + + func healthStatus(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + completion: @escaping (Result) -> Void) { + runHealthCommand(vm: vm, session: session, appState: appState, agent: agent, + args: ["health", agent.rawValue], + failureFallback: AgentText("Agent health check failed", "Agent 健康检查失败"), + successMessage: AgentText("Health status refreshed", "健康状态已更新"), + completion: completion) + } + + func restartAgent(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + keepCount: Int = AgentBackupSchedule.defaultKeepCount, + completion: @escaping (Result) -> Void) { + runRepairCommand(vm: vm, session: session, appState: appState, agent: agent, + repairArgs: ["restart", agent.rawValue], + successMessage: AgentText("Agent restarted", "已重新启动 Agent"), + keepCount: keepCount, + completion: completion) + } + + func testModel(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + completion: @escaping (Result) -> Void) { + runHealthCommand(vm: vm, session: session, appState: appState, agent: agent, + args: ["test-model", agent.rawValue], + failureFallback: AgentText("Model connection test failed", "模型连接测试失败"), + successMessage: AgentText("Model connection tested", "模型连接已测试"), + completion: completion) + } + + func resetAgentConfig(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + keepCount: Int = AgentBackupSchedule.defaultKeepCount, + completion: @escaping (Result) -> Void) { + runRepairCommand(vm: vm, session: session, appState: appState, agent: agent, + repairArgs: ["reset-config", agent.rawValue], + successMessage: AgentText("Agent configuration reset", "已重置 Agent 配置"), + keepCount: keepCount, + completion: completion) + } + + func exportDiagnostics(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + completion: @escaping (Result) -> Void) { + withBackupShare(vmId: vm.id, appState: appState) { share, cleanup in + do { + try self.prepareAgentToolScript(share: share) + let guestDir = "/mnt/shared/\(share.tag)" + let command = Self.scriptCommand(tag: share.tag, args: ["diagnostics", agent.rawValue, guestDir]) + session.runGuestAgentCommand(command, timeout: 180) { result in + cleanup() + switch result { + case .success(let commandResult): + guard commandResult.exitCode == 0 else { + completion(.failure(Self.makeToolError(commandResult, fallback: AgentText("Agent diagnostics export failed", "Agent 诊断导出失败")))) + return + } + completion(.success(AgentToolResult(message: AgentText("Diagnostics exported", "已导出诊断包"), output: commandResult.output))) + case .failure(let error): + completion(.failure(Self.localizedGuestError(error))) + } + } + } catch { + cleanup() + completion(.failure(error)) + } + } failure: { completion(.failure($0)) } + } + + func migrateOpenClawToHermes(sourceVm: VmInfo, sourceSession: VmSession, + targetVm: VmInfo, targetSession: VmSession, + appState: AppState, + options: AgentMigrationOptions = AgentMigrationOptions(), + keepCount: Int = AgentBackupSchedule.defaultKeepCount, + progress: @escaping (AgentMigrationProgress) -> Void = { _ in }, + completion: @escaping (Result) -> Void) { + let emit: (AgentMigrationStep, String, String?) -> Void = { step, message, detail in + DispatchQueue.main.async { progress(AgentMigrationProgress(step: step, message: message, detail: detail)) } + } + do { + let backupPackage = try backupPackageURL(vmId: targetVm.id, agent: .hermes) + let reportURL = try migrationReportURL(vmId: targetVm.id, agent: .hermes) + withBackupShare(vmId: targetVm.id, appState: appState) { backupShare, backupCleanup in + do { try self.prepareAgentToolScript(share: backupShare) } catch { backupCleanup(); completion(.failure(error)); return } + withOperationShare(vmIds: [sourceVm.id, targetVm.id], appState: appState) { share, cleanup in + do { try self.prepareAgentToolScript(share: share) } catch { cleanup(); backupCleanup(); completion(.failure(error)); return } + let cleanupAll = { cleanup(); backupCleanup() } + let guestBackupDir = "/mnt/shared/\(backupShare.tag)/hermes" + let guestBackup = "\(guestBackupDir)/\(backupPackage.lastPathComponent)" + let guestReport = "\(guestBackupDir)/\(reportURL.lastPathComponent)" + let backupCommand = Self.withSharedFolderReady(tag: backupShare.tag, body: "mkdir -p \(Self.shellQuote(guestBackupDir))\n" + Self.scriptInvocation(tag: backupShare.tag, args: ["export-profile", AgentKind.hermes.rawValue, guestBackup, "backup"])) + emit(.backup, AgentText("Creating target Hermes pre-migration backup", "正在创建目标 Hermes 迁移前备份"), backupPackage.lastPathComponent) + targetSession.runGuestAgentCommand(backupCommand, timeout: 420) { backupResult in + switch backupResult { + case .success(let backupCommandResult): + guard backupCommandResult.exitCode == 0 else { cleanupAll(); completion(.failure(Self.makeToolError(backupCommandResult, fallback: AgentText("Hermes pre-migration backup failed", "Hermes 迁移前备份失败")))); return } + let archivePath = "/mnt/shared/\(share.tag)/openclaw-source.tar.gz" + let exportCommand = Self.scriptCommand(tag: share.tag, args: ["export-openclaw-source", archivePath]) + emit(.exportSource, AgentText("Exporting OpenClaw user data from source VM", "正在从来源 VM 导出 OpenClaw 用户数据"), sourceVm.name) + sourceSession.runGuestAgentCommand(exportCommand, timeout: 420) { sourceResult in + switch sourceResult { + case .success(let sourceCommandResult): + guard sourceCommandResult.exitCode == 0 else { cleanupAll(); completion(.failure(Self.makeToolError(sourceCommandResult, fallback: AgentText("OpenClaw data export failed", "OpenClaw 数据导出失败")))); return } + let dryRunCommand = Self.scriptCommand(tag: share.tag, args: ["migrate-openclaw-dry-run", archivePath, guestReport, options.skillConflictStrategy.rawValue, options.workspaceTarget]) + emit(.dryRun, AgentText("Generating official dry-run migration plan", "正在生成官方 dry-run 迁移计划"), AgentText("Conflict strategy: \(options.skillConflictStrategy.displayName)", "冲突策略:\(options.skillConflictStrategy.displayName)")) + targetSession.runGuestAgentCommand(dryRunCommand, timeout: 420) { dryRunResult in + switch dryRunResult { + case .success(let dryRunCommandResult): + guard dryRunCommandResult.exitCode == 0 else { cleanupAll(); completion(.failure(Self.makeToolError(dryRunCommandResult, fallback: AgentText("OpenClaw to Hermes migration preflight failed", "OpenClaw 到 Hermes 迁移预检失败")))); return } + emit(.migrate, AgentText("Dry run passed; applying migration", "dry-run 已通过,正在执行正式迁移"), reportURL.lastPathComponent) + let migrateCommand = Self.scriptCommand(tag: share.tag, args: ["migrate-openclaw-apply", archivePath, guestReport, options.skillConflictStrategy.rawValue, options.workspaceTarget]) + targetSession.runGuestAgentCommand(migrateCommand, timeout: 600) { targetResult in + cleanupAll() + switch targetResult { + case .success(let targetCommandResult): + guard targetCommandResult.exitCode == 0 else { completion(.failure(Self.makeToolError(targetCommandResult, fallback: AgentText("OpenClaw to Hermes migration failed", "OpenClaw 到 Hermes 迁移失败")))); return } + self.rotateBackups(vmId: targetVm.id, agent: .hermes, keep: keepCount) + emit(.complete, AgentText("Migration completed; report saved", "迁移完成,报告已保存"), reportURL.lastPathComponent) + completion(.success(AgentToolResult(message: AgentText("OpenClaw to Hermes migration completed", "已完成 OpenClaw 到 Hermes 迁移"), output: """ + \(AgentText("Pre-migration backup: ", "迁移前备份:"))\(backupPackage.path) + \(AgentText("Migration report: ", "迁移报告:"))\(reportURL.path) + \(AgentText("Source VM: ", "来源 VM:"))\(sourceVm.name) + \(AgentText("Target VM: ", "目标 VM:"))\(targetVm.name) + \(AgentText("Conflict strategy: ", "冲突策略:"))\(options.skillConflictStrategy.displayName) + \(AgentText("Workspace target: ", "Workspace 目标:"))\(options.workspaceTarget.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty ? AgentText("Default", "默认") : options.workspaceTarget) + + [health] + \(Self.compactMigrationOutput(targetCommandResult.output) ?? AgentText("Migration command completed", "迁移命令已完成")) + """))) + case .failure(let error): completion(.failure(Self.localizedGuestError(error))) + } + } + case .failure(let error): cleanupAll(); completion(.failure(Self.localizedGuestError(error))) + } + } + case .failure(let error): cleanupAll(); completion(.failure(Self.localizedGuestError(error))) + } + } + case .failure(let error): cleanupAll(); completion(.failure(Self.localizedGuestError(error))) + } + } + } failure: { error in backupCleanup(); completion(.failure(error)) } + } failure: { completion(.failure($0)) } + } catch { completion(.failure(error)) } + } + + private func runHealthCommand(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + args: [String], failureFallback: String, successMessage: String, + completion: @escaping (Result) -> Void) { + withOperationShare(vmId: vm.id, appState: appState) { share, cleanup in + do { + try self.prepareAgentToolScript(share: share) + session.runGuestAgentCommand(Self.scriptCommand(tag: share.tag, args: args), timeout: 180) { result in + cleanup() + switch result { + case .success(let commandResult): + guard commandResult.exitCode == 0 else { completion(.failure(Self.makeToolError(commandResult, fallback: failureFallback))); return } + completion(.success(AgentToolResult(message: successMessage, output: commandResult.output))) + case .failure(let error): completion(.failure(Self.localizedGuestError(error))) + } + } + } catch { cleanup(); completion(.failure(error)) } + } failure: { completion(.failure($0)) } + } + + private func runRepairCommand(vm: VmInfo, session: VmSession, appState: AppState, agent: AgentKind, + repairArgs: [String], successMessage: String, + keepCount: Int = AgentBackupSchedule.defaultKeepCount, + completion: @escaping (Result) -> Void) { + do { + let package = try backupPackageURL(vmId: vm.id, agent: agent) + withBackupShare(vmId: vm.id, appState: appState) { share, cleanup in + do { + try self.prepareAgentToolScript(share: share) + let guestDir = "/mnt/shared/\(share.tag)/\(agent.rawValue)" + let guestPackage = "\(guestDir)/\(package.lastPathComponent)" + let command = Self.withSharedFolderReady(tag: share.tag, body: "mkdir -p \(Self.shellQuote(guestDir))\n" + Self.scriptInvocation(tag: share.tag, args: ["export-profile", agent.rawValue, guestPackage, "backup"]) + Self.scriptInvocation(tag: share.tag, args: repairArgs)) + session.runGuestAgentCommand(command, timeout: 420) { result in + cleanup() + switch result { + case .success(let commandResult): + guard commandResult.exitCode == 0 else { completion(.failure(Self.makeToolError(commandResult, fallback: AgentText("Agent repair operation failed", "Agent 修复操作失败")))); return } + self.rotateBackups(vmId: vm.id, agent: agent, keep: keepCount) + completion(.success(AgentToolResult(message: successMessage, output: "\(AgentText("Pre-repair backup: ", "修复前备份:"))\(package.path)\n\(commandResult.output)"))) + case .failure(let error): completion(.failure(Self.localizedGuestError(error))) + } + } + } catch { cleanup(); completion(.failure(error)) } + } failure: { completion(.failure($0)) } + } catch { completion(.failure(error)) } + } + + private func withOperationShare(vmId: String, appState: AppState, + perform: (SharedFolder, @escaping () -> Void) -> Void, + failure: (Error) -> Void) { + withOperationShare(vmIds: [vmId], appState: appState, perform: perform, failure: failure) + } + + private func withOperationShare(vmIds: [String], appState: AppState, + perform: (SharedFolder, @escaping () -> Void) -> Void, + failure: (Error) -> Void) { + do { + let base = try operationBaseDirectory() + let tag = "tenbox-agent-ops-\(UUID().uuidString.prefix(8).lowercased())" + let dirName = "\(vmIds.joined(separator: "-"))-\(tag)" + let dir = base.appendingPathComponent(dirName, isDirectory: true) + try fileManager.createDirectory(at: dir, withIntermediateDirectories: true) + let share = SharedFolder(tag: tag, hostPath: dir.path, readonly: false) + for vmId in vmIds { + appState.addRuntimeSharedFolder(share, toVm: vmId) + } + + let cleanup: () -> Void = { [weak appState, weak self] in + DispatchQueue.main.async { + for vmId in vmIds { + appState?.removeRuntimeSharedFolder(tag: tag, fromVm: vmId) + } + try? self?.fileManager.removeItem(at: dir) + } + } + perform(share, cleanup) + } catch { + failure(error) + } + } + + private func withBackupShare(vmId: String, appState: AppState, + perform: (SharedFolder, @escaping () -> Void) -> Void, + failure: (Error) -> Void) { + do { + let dir = try backupDirectory(vmId: vmId) + let tag = "tenbox-agent-backups-\(UUID().uuidString.prefix(8).lowercased())" + let share = SharedFolder(tag: tag, hostPath: dir.path, readonly: false) + appState.addRuntimeSharedFolder(share, toVm: vmId) + let cleanup: () -> Void = { [weak appState] in + DispatchQueue.main.async { + appState?.removeRuntimeSharedFolder(tag: tag, fromVm: vmId) + } + } + perform(share, cleanup) + } catch { + failure(error) + } + } + + private func operationBaseDirectory() throws -> URL { + let appSupport = try fileManager.url( + for: .applicationSupportDirectory, + in: .userDomainMask, + appropriateFor: nil, + create: true + ) + let dir = appSupport.appendingPathComponent("TenBox/AgentOperations", isDirectory: true) + try fileManager.createDirectory(at: dir, withIntermediateDirectories: true) + return dir + } + + private func backupDirectory(vmId: String) throws -> URL { + let appSupport = try fileManager.url( + for: .applicationSupportDirectory, + in: .userDomainMask, + appropriateFor: nil, + create: true + ) + let dir = appSupport.appendingPathComponent("TenBox/AgentBackups/\(vmId)", isDirectory: true) + try fileManager.createDirectory(at: dir, withIntermediateDirectories: true) + return dir + } + + private func backupPackageDirectory(vmId: String, agent: AgentKind) throws -> URL { + let dir = try backupDirectory(vmId: vmId).appendingPathComponent(agent.rawValue, isDirectory: true) + try fileManager.createDirectory(at: dir, withIntermediateDirectories: true) + return dir + } + + private func backupPackageURL(vmId: String, agent: AgentKind) throws -> URL { + let formatter = DateFormatter() + formatter.calendar = Calendar(identifier: .gregorian) + formatter.locale = Locale(identifier: "en_US_POSIX") + formatter.dateFormat = "yyyy-MM-dd-HHmmss" + return try backupPackageDirectory(vmId: vmId, agent: agent) + .appendingPathComponent("agent-data-\(formatter.string(from: Date())).tar.gz") + } + + private func migrationReportURL(vmId: String, agent: AgentKind) throws -> URL { + let formatter = DateFormatter() + formatter.calendar = Calendar(identifier: .gregorian) + formatter.locale = Locale(identifier: "en_US_POSIX") + formatter.dateFormat = "yyyy-MM-dd-HHmmss" + return try backupPackageDirectory(vmId: vmId, agent: agent) + .appendingPathComponent("openclaw-migration-\(formatter.string(from: Date())).txt") + } + + func listBackupPackages(vmId: String, agent: AgentKind) throws -> [AgentBackupPackage] { + let dir = try backupPackageDirectory(vmId: vmId, agent: agent) + let items = (try? fileManager.contentsOfDirectory( + at: dir, + includingPropertiesForKeys: [.contentModificationDateKey, .fileSizeKey], + options: [.skipsHiddenFiles] + )) ?? [] + return items + .filter { $0.pathExtension == "gz" && $0.lastPathComponent.hasPrefix("agent-data-") } + .map { url in + let values = try? url.resourceValues(forKeys: [.contentModificationDateKey, .fileSizeKey]) + return AgentBackupPackage( + url: url, + modifiedAt: values?.contentModificationDate ?? .distantPast, + sizeBytes: Int64(values?.fileSize ?? 0) + ) + } + .sorted { $0.modifiedAt > $1.modifiedAt } + } + + private func latestBackupPackage(vmId: String, agent: AgentKind) throws -> URL? { + try listBackupPackages(vmId: vmId, agent: agent).first?.url + } + + func rotateBackups(vmId: String, agent: AgentKind, keep: Int) { + guard let packages = try? listBackupPackages(vmId: vmId, agent: agent) else { return } + for old in packages.dropFirst(keep) { + try? fileManager.removeItem(at: old.url) + } + } + + private func prepareAgentToolScript(share: SharedFolder) throws { + let source = try Self.agentToolScriptURL() + let destination = URL(fileURLWithPath: share.hostPath).appendingPathComponent("agent_tools.sh") + if fileManager.fileExists(atPath: destination.path) { try fileManager.removeItem(at: destination) } + try fileManager.copyItem(at: source, to: destination) + } + + private static func agentToolScriptURL() throws -> URL { + let candidates: [URL?] = [ + Bundle.main.url(forResource: "agent_tools", withExtension: "sh", subdirectory: "AgentTools"), + Bundle.module.url(forResource: "agent_tools", withExtension: "sh", subdirectory: "AgentTools"), + URL(fileURLWithPath: #filePath) + .deletingLastPathComponent() + .deletingLastPathComponent() + .deletingLastPathComponent() + .appendingPathComponent("agent_tools/guest/agent_tools.sh") + ] + for candidate in candidates.compactMap({ $0 }) where FileManager.default.isReadableFile(atPath: candidate.path) { + return candidate + } + throw ConsoleCommandError(AgentText("Agent tools script was not found.", "找不到 Agent 工具箱脚本。")) + } + + private static func withSharedFolderReady(tag: String, body: String) -> String { + let path = "/mnt/shared/\(tag)" + return """ + set -eu + share_dir=\(shellQuote(path)) + i=0 + while [ "$i" -lt 100 ]; do + if [ -d "$share_dir" ] && [ -w "$share_dir" ]; then break; fi + i=$((i + 1)); sleep 0.2 + done + [ -d "$share_dir" ] || { echo "Shared folder is not mounted: $share_dir" >&2; exit 1; } + [ -w "$share_dir" ] || { echo "Shared folder is not writable: $share_dir" >&2; exit 1; } + \(body) + """ + } + + private static func scriptInvocation(tag: String, args: [String]) -> String { + let script = "/mnt/shared/\(tag)/agent_tools.sh" + let quotedArgs = args.map(shellQuote).joined(separator: " ") + return """ + script=\(shellQuote(script)) + [ -f "$script" ] || { echo "Agent tools script is missing: $script" >&2; exit 1; } + chmod +x "$script" 2>/dev/null || true + /bin/sh "$script" \(quotedArgs) + """ + } + + private static func scriptCommand(tag: String, args: [String]) -> String { + withSharedFolderReady(tag: tag, body: scriptInvocation(tag: tag, args: args)) + } + + private static func compactMigrationOutput(_ output: String) -> String? { + let lines = output + .split(whereSeparator: { $0.isNewline }) + .map { String($0).trimmingCharacters(in: .whitespaces) } + .filter { !$0.isEmpty } + guard !lines.isEmpty else { return nil } + return lines.prefix(8).joined(separator: "\n") + } + + private static func shellQuote(_ value: String) -> String { + "'" + value.replacingOccurrences(of: "'", with: "'\\''") + "'" + } + + private static func makeToolError(_ result: ConsoleCommandResult, fallback: String) -> Error { + let message = result.output.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty ? fallback : result.output + return ConsoleCommandError(localizedGuestMessage(message)) + } + + private static func localizedGuestError(_ error: Error) -> Error { + ConsoleCommandError(localizedGuestMessage(error.localizedDescription)) + } + + private static func localizedGuestMessage(_ message: String) -> String { + if message.contains("Agent tools require a Linux guest OS") || message.contains("/bin/sh") || message.contains("No such file") { + return AgentText("Agent tools require a Linux guest OS.", "Agent 工具箱需要 Linux Guest OS。") + } + return message + } +} diff --git a/src/manager-macos/TenBoxApp.swift b/src/manager-macos/TenBoxApp.swift index 982e1eb..6dc2ba9 100644 --- a/src/manager-macos/TenBoxApp.swift +++ b/src/manager-macos/TenBoxApp.swift @@ -143,25 +143,32 @@ class AppState: ObservableObject { @Published var showForceStopConfirm = false @Published var showSharedFoldersSheet = false @Published var showPortForwardsSheet = false + @Published var showAgentToolsSheet = false @Published var startVmError: String? @Published var hostForwardError: String? @Published var llmMappings: [LlmModelMapping] = [] @Published var llmLoggingEnabled = false + @Published var agentBackupSchedules: [String: AgentBackupSchedule] = [:] let llmProxy = LlmProxyService() + private let agentTools = AgentToolsService() private static let kLlmGuestIp = "10.0.2.3" private static let kLlmGuestPort: UInt16 = 80 private var bridge = TenBoxBridgeWrapper() let clipboardHandler = ClipboardHandler() private var activeSessions: [String: VmSession] = [:] + private var runtimeSharedFolders: [String: [SharedFolder]] = [:] private var sessionCancellables: [String: AnyCancellable] = [:] private var stateObserver: NSObjectProtocol? private var workspaceWakeObserver: NSObjectProtocol? + private var agentBackupTimer: Timer? + private var scheduledBackupsRunning: Set = [] private var pendingVmStartId: String? private var sleepAssertionID: IOPMAssertionID = IOPMAssertionID(0) init() { + bridge.configStore.purgeAgentToolSharedFolders() refreshVmList() NSLog("[TenBoxApp] Loaded %d VM(s):", vms.count) for vm in vms { @@ -169,6 +176,8 @@ class AppState: ObservableObject { } loadLlmMappings() startLlmProxyIfNeeded() + loadAgentBackupSchedules() + startAgentBackupScheduler() setupClipboard() stateObserver = NotificationCenter.default.addObserver( forName: NSNotification.Name("TenBoxVmStateChanged"), @@ -224,6 +233,7 @@ class AppState: ObservableObject { deinit { clipboardHandler.stopMonitoring() + agentBackupTimer?.invalidate() releaseSleepAssertion() if let obs = stateObserver { NotificationCenter.default.removeObserver(obs) @@ -429,6 +439,20 @@ class AppState: ObservableObject { sendSharedFoldersUpdateIfRunning(vmId: vmId) } + func addRuntimeSharedFolder(_ folder: SharedFolder, toVm vmId: String) { + runtimeSharedFolders[vmId, default: []].removeAll { $0.tag == folder.tag } + runtimeSharedFolders[vmId, default: []].append(folder) + sendSharedFoldersUpdateIfRunning(vmId: vmId) + } + + func removeRuntimeSharedFolder(tag: String, fromVm vmId: String) { + runtimeSharedFolders[vmId]?.removeAll { $0.tag == tag } + if runtimeSharedFolders[vmId]?.isEmpty == true { + runtimeSharedFolders.removeValue(forKey: vmId) + } + sendSharedFoldersUpdateIfRunning(vmId: vmId) + } + func addHostForward(_ pf: HostForward, toVm vmId: String) { _ = bridge.addHostForward(pf, toVm: vmId) refreshVmList() @@ -453,6 +477,354 @@ class AppState: ObservableObject { sendNetworkUpdateIfRunning(vmId: vmId) } + func exportAgentProfile(vmId: String, agent: AgentKind, destinationURL: URL, + completion: @escaping (Result) -> Void) { + guard let vm = vms.first(where: { $0.id == vmId }) else { + completion(.failure(ConsoleCommandError("找不到 VM"))) + return + } + let session = getOrCreateSession(for: vmId) + agentTools.exportProfile(vm: vm, session: session, appState: self, agent: agent, + destinationURL: destinationURL, completion: completion) + } + + func importAgentProfile(vmId: String, agent: AgentKind, sourceURL: URL, + completion: @escaping (Result) -> Void) { + guard let vm = vms.first(where: { $0.id == vmId }) else { + completion(.failure(ConsoleCommandError("找不到 VM"))) + return + } + let session = getOrCreateSession(for: vmId) + agentTools.importProfile(vm: vm, session: session, appState: self, agent: agent, + sourceURL: sourceURL, completion: completion) + } + + func migrateOpenClawToHermes(sourceVmId: String, targetVmId: String, + options: AgentMigrationOptions = AgentMigrationOptions(), + progress: @escaping (AgentMigrationProgress) -> Void = { _ in }, + completion: @escaping (Result) -> Void) { + guard sourceVmId != targetVmId else { + completion(.failure(ConsoleCommandError(AgentText("Source VM and target VM cannot be the same.", "来源 VM 和目标 VM 不能相同")))) + return + } + guard let sourceVm = vms.first(where: { $0.id == sourceVmId }) else { + completion(.failure(ConsoleCommandError(AgentText("OpenClaw source VM was not found.", "找不到 OpenClaw 来源 VM")))) + return + } + guard let targetVm = vms.first(where: { $0.id == targetVmId }) else { + completion(.failure(ConsoleCommandError(AgentText("Hermes target VM was not found.", "找不到 Hermes 目标 VM")))) + return + } + guard sourceVm.state == .running else { + completion(.failure(ConsoleCommandError(AgentText("OpenClaw source VM is not running.", "OpenClaw 来源 VM 未运行")))) + return + } + guard targetVm.state == .running else { + completion(.failure(ConsoleCommandError(AgentText("Hermes target VM is not running.", "Hermes 目标 VM 未运行")))) + return + } + + let sourceSession = getOrCreateSession(for: sourceVmId) + let targetSession = getOrCreateSession(for: targetVmId) + if !sourceSession.connected || !sourceSession.ipcClient.isConnected { + sourceSession.connectIfNeeded() + completion(.failure(ConsoleCommandError(AgentText("OpenClaw source VM execution channel is not connected. Try again shortly.", "OpenClaw 来源 VM 执行通道未连接,请稍后重试")))) + return + } + guard sourceSession.guestAgentConnected else { + completion(.failure(ConsoleCommandError(AgentText("OpenClaw source VM Guest Agent is not connected.", "OpenClaw 来源 VM Guest Agent 未连接")))) + return + } + if !targetSession.connected || !targetSession.ipcClient.isConnected { + targetSession.connectIfNeeded() + completion(.failure(ConsoleCommandError(AgentText("Hermes target VM execution channel is not connected. Try again shortly.", "Hermes 目标 VM 执行通道未连接,请稍后重试")))) + return + } + guard targetSession.guestAgentConnected else { + completion(.failure(ConsoleCommandError(AgentText("Hermes target VM Guest Agent is not connected.", "Hermes 目标 VM Guest Agent 未连接")))) + return + } + + agentTools.migrateOpenClawToHermes(sourceVm: sourceVm, + sourceSession: sourceSession, + targetVm: targetVm, + targetSession: targetSession, + appState: self, + options: options, + keepCount: agentBackupSchedule(vmId: targetVmId, agent: .hermes).keepCount, + progress: progress, + completion: completion) + } + + func agentBackupStatus(vmId: String, agent: AgentKind, + completion: @escaping (Result) -> Void) { + guard let vm = vms.first(where: { $0.id == vmId }) else { + completion(.failure(ConsoleCommandError("找不到 VM"))) + return + } + let session = getOrCreateSession(for: vmId) + agentTools.backupStatus(vm: vm, session: session, appState: self, agent: agent, + completion: completion) + } + + func listAgentBackups(vmId: String, agent: AgentKind) -> [AgentBackupPackage] { + (try? agentTools.listBackupPackages(vmId: vmId, agent: agent)) ?? [] + } + + func snapshotAgentBackup(vmId: String, agent: AgentKind, + completion: @escaping (Result) -> Void) { + guard let vm = vms.first(where: { $0.id == vmId }) else { + completion(.failure(ConsoleCommandError("找不到 VM"))) + return + } + let session = getOrCreateSession(for: vmId) + agentTools.snapshotBackup(vm: vm, session: session, appState: self, agent: agent, + keepCount: agentBackupSchedule(vmId: vmId, agent: agent).keepCount, + completion: completion) + } + + func restoreAgentBackup(vmId: String, agent: AgentKind, packageURL: URL, + completion: @escaping (Result) -> Void) { + guard let vm = vms.first(where: { $0.id == vmId }) else { + completion(.failure(ConsoleCommandError("找不到 VM"))) + return + } + let session = getOrCreateSession(for: vmId) + agentTools.restoreBackup(vm: vm, session: session, appState: self, agent: agent, + packageURL: packageURL, completion: completion) + } + + func agentHealthStatus(vmId: String, agent: AgentKind, + completion: @escaping (Result) -> Void) { + guard let vm = vms.first(where: { $0.id == vmId }) else { + completion(.failure(ConsoleCommandError("找不到 VM"))) + return + } + let session = getOrCreateSession(for: vmId) + agentTools.healthStatus(vm: vm, session: session, appState: self, agent: agent, + completion: completion) + } + + func restartAgent(vmId: String, agent: AgentKind, + completion: @escaping (Result) -> Void) { + guard let vm = vms.first(where: { $0.id == vmId }) else { + completion(.failure(ConsoleCommandError("找不到 VM"))) + return + } + let session = getOrCreateSession(for: vmId) + agentTools.restartAgent(vm: vm, session: session, appState: self, agent: agent, + keepCount: agentBackupSchedule(vmId: vmId, agent: agent).keepCount, + completion: completion) + } + + func testAgentModel(vmId: String, agent: AgentKind, + completion: @escaping (Result) -> Void) { + guard let vm = vms.first(where: { $0.id == vmId }) else { + completion(.failure(ConsoleCommandError("找不到 VM"))) + return + } + let session = getOrCreateSession(for: vmId) + agentTools.testModel(vm: vm, session: session, appState: self, agent: agent, + completion: completion) + } + + func resetAgentConfig(vmId: String, agent: AgentKind, + completion: @escaping (Result) -> Void) { + guard let vm = vms.first(where: { $0.id == vmId }) else { + completion(.failure(ConsoleCommandError("找不到 VM"))) + return + } + let session = getOrCreateSession(for: vmId) + agentTools.resetAgentConfig(vm: vm, session: session, appState: self, agent: agent, + keepCount: agentBackupSchedule(vmId: vmId, agent: agent).keepCount, + completion: completion) + } + + func exportAgentDiagnostics(vmId: String, agent: AgentKind, + completion: @escaping (Result) -> Void) { + guard let vm = vms.first(where: { $0.id == vmId }) else { + completion(.failure(ConsoleCommandError("找不到 VM"))) + return + } + let session = getOrCreateSession(for: vmId) + agentTools.exportDiagnostics(vm: vm, session: session, appState: self, agent: agent, + completion: completion) + } + + func agentBackupSchedule(vmId: String, agent: AgentKind) -> AgentBackupSchedule { + agentBackupSchedules[Self.agentBackupScheduleKey(vmId: vmId, agent: agent)] ?? AgentBackupSchedule() + } + + func setAgentBackupSchedule(_ schedule: AgentBackupSchedule, vmId: String, agent: AgentKind) { + let previous = agentBackupSchedule(vmId: vmId, agent: agent) + var normalized = AgentBackupSchedule( + enabled: schedule.enabled, + hour: schedule.hour, + minute: schedule.minute, + keepCount: schedule.keepCount, + lastRunDate: schedule.lastRunDate, + lastAttemptAt: schedule.lastAttemptAt, + lastAttemptStatus: schedule.lastAttemptStatus, + lastAttemptMessage: schedule.lastAttemptMessage + ) + let now = Date() + let calendar = Calendar.current + let nowMinutes = calendar.component(.hour, from: now) * 60 + calendar.component(.minute, from: now) + let scheduledMinutes = normalized.hour * 60 + normalized.minute + if !previous.enabled && normalized.enabled && nowMinutes >= scheduledMinutes { + normalized.lastRunDate = Self.agentBackupDateKey(now) + } + agentBackupSchedules[Self.agentBackupScheduleKey(vmId: vmId, agent: agent)] = normalized + saveAgentBackupSchedules() + agentTools.rotateBackups(vmId: vmId, agent: agent, keep: normalized.keepCount) + } + + private static func agentBackupScheduleKey(vmId: String, agent: AgentKind) -> String { + "\(vmId)|\(agent.rawValue)" + } + + private func loadAgentBackupSchedules() { + guard let agentBackups = readSettingsJSON()["agent_backups"] as? [String: Any], + let schedules = agentBackups["schedules"] as? [String: Any] else { + agentBackupSchedules = [:] + return + } + var loaded: [String: AgentBackupSchedule] = [:] + for (key, value) in schedules { + guard let dict = value as? [String: Any] else { continue } + loaded[key] = AgentBackupSchedule( + enabled: dict["enabled"] as? Bool ?? false, + hour: dict["hour"] as? Int ?? AgentBackupSchedule.defaultHour, + minute: dict["minute"] as? Int ?? AgentBackupSchedule.defaultMinute, + keepCount: dict["keep_count"] as? Int ?? AgentBackupSchedule.defaultKeepCount, + lastRunDate: dict["last_run_date"] as? String, + lastAttemptAt: dict["last_attempt_at"] as? String, + lastAttemptStatus: dict["last_attempt_status"] as? String, + lastAttemptMessage: dict["last_attempt_message"] as? String + ) + } + agentBackupSchedules = loaded + } + + private func saveAgentBackupSchedules() { + var json = readSettingsJSON() + let schedules: [String: [String: Any]] = agentBackupSchedules.mapValues { schedule in + var value: [String: Any] = [ + "enabled": schedule.enabled, + "hour": schedule.hour, + "minute": schedule.minute, + "keep_count": schedule.keepCount, + ] + if let lastRunDate = schedule.lastRunDate { + value["last_run_date"] = lastRunDate + } + if let lastAttemptAt = schedule.lastAttemptAt { + value["last_attempt_at"] = lastAttemptAt + } + if let lastAttemptStatus = schedule.lastAttemptStatus { + value["last_attempt_status"] = lastAttemptStatus + } + if let lastAttemptMessage = schedule.lastAttemptMessage { + value["last_attempt_message"] = lastAttemptMessage + } + return value + } + json["agent_backups"] = ["schedules": schedules] as [String: Any] + writeSettingsJSON(json) + } + + private func startAgentBackupScheduler() { + agentBackupTimer?.invalidate() + agentBackupTimer = Timer.scheduledTimer(withTimeInterval: 60, repeats: true) { [weak self] _ in + self?.runDueAgentBackups() + } + runDueAgentBackups() + } + + private func runDueAgentBackups(now: Date = Date()) { + let calendar = Calendar.current + let today = Self.agentBackupDateKey(now) + let nowMinutes = calendar.component(.hour, from: now) * 60 + calendar.component(.minute, from: now) + + for (key, schedule) in agentBackupSchedules { + guard schedule.enabled, schedule.lastRunDate != today else { continue } + let scheduledMinutes = schedule.hour * 60 + schedule.minute + guard nowMinutes >= scheduledMinutes else { continue } + guard !scheduledBackupsRunning.contains(key) else { continue } + + let parts = key.split(separator: "|", maxSplits: 1).map(String.init) + guard parts.count == 2, + let agent = AgentKind(rawValue: parts[1]) else { + continue + } + guard let vm = vms.first(where: { $0.id == parts[0] }) else { continue } + guard vm.state == .running else { + updateAgentBackupAttempt(key: key, base: schedule, status: "failed", message: AgentText("VM is not running", "VM 未运行"), at: now) + continue + } + + let session = getOrCreateSession(for: vm.id) + if !session.connected || !session.ipcClient.isConnected { + session.connectIfNeeded() + updateAgentBackupAttempt(key: key, base: schedule, status: "failed", message: AgentText("Execution channel is not connected", "执行通道未连接"), at: now) + continue + } + guard session.guestAgentConnected else { + updateAgentBackupAttempt(key: key, base: schedule, status: "failed", message: AgentText("Execution channel is not connected", "执行通道未连接"), at: now) + continue + } + + scheduledBackupsRunning.insert(key) + agentTools.snapshotBackup(vm: vm, session: session, appState: self, agent: agent, keepCount: schedule.keepCount) { [weak self] result in + DispatchQueue.main.async { + guard let self = self else { return } + self.scheduledBackupsRunning.remove(key) + switch result { + case .success: + self.updateAgentBackupAttempt(key: key, base: schedule, status: "success", message: AgentText("Succeeded", "成功"), at: now, lastRunDate: today) + NSLog("[AgentBackup] Scheduled backup completed: %@ %@", vm.id, agent.rawValue) + case .failure(let error): + self.updateAgentBackupAttempt(key: key, base: schedule, status: "failed", message: error.localizedDescription, at: now, lastRunDate: today) + NSLog("[AgentBackup] Scheduled backup failed: %@ %@ %@", vm.id, agent.rawValue, error.localizedDescription) + } + } + } + } + } + + private func updateAgentBackupAttempt(key: String, + base: AgentBackupSchedule, + status: String, + message: String, + at date: Date, + lastRunDate: String? = nil) { + var updated = agentBackupSchedules[key] ?? base + updated.lastAttemptAt = Self.agentBackupAttemptTimeText(date) + updated.lastAttemptStatus = status + updated.lastAttemptMessage = message + if let lastRunDate { + updated.lastRunDate = lastRunDate + } + agentBackupSchedules[key] = updated + saveAgentBackupSchedules() + } + + private static func agentBackupDateKey(_ date: Date) -> String { + let formatter = DateFormatter() + formatter.calendar = Calendar(identifier: .gregorian) + formatter.locale = Locale(identifier: "en_US_POSIX") + formatter.dateFormat = "yyyy-MM-dd" + return formatter.string(from: date) + } + + private static func agentBackupAttemptTimeText(_ date: Date) -> String { + let formatter = DateFormatter() + formatter.calendar = Calendar(identifier: .gregorian) + formatter.locale = Locale(identifier: "en_US_POSIX") + formatter.dateFormat = "MM-dd HH:mm" + return formatter.string(from: date) + } + // MARK: - LLM Proxy settings private var settingsPath: String { @@ -462,10 +834,22 @@ class AppState: ObservableObject { return dir + "/settings.json" } - func loadLlmMappings() { + private func readSettingsJSON() -> [String: Any] { guard let data = FileManager.default.contents(atPath: settingsPath), - let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any], - let llmProxy = json["llm_proxy"] as? [String: Any], + let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any] else { + return [:] + } + return json + } + + private func writeSettingsJSON(_ json: [String: Any]) { + if let data = try? JSONSerialization.data(withJSONObject: json, options: .prettyPrinted) { + try? data.write(to: URL(fileURLWithPath: settingsPath)) + } + } + + func loadLlmMappings() { + guard let llmProxy = readSettingsJSON()["llm_proxy"] as? [String: Any], let mappingsArray = llmProxy["mappings"] as? [[String: Any]] else { llmMappings = [] return @@ -484,11 +868,7 @@ class AppState: ObservableObject { } private func saveLlmMappings() { - var json: [String: Any] = [:] - if let data = FileManager.default.contents(atPath: settingsPath), - let existing = try? JSONSerialization.jsonObject(with: data) as? [String: Any] { - json = existing - } + var json = readSettingsJSON() let mappingsArray: [[String: Any]] = llmMappings.map { m in [ "alias": m.alias, @@ -502,9 +882,7 @@ class AppState: ObservableObject { "mappings": mappingsArray, "enable_logging": llmLoggingEnabled, ] as [String: Any] - if let data = try? JSONSerialization.data(withJSONObject: json, options: .prettyPrinted) { - try? data.write(to: URL(fileURLWithPath: settingsPath)) - } + writeSettingsJSON(json) } func addLlmMapping(_ mapping: LlmModelMapping) { @@ -558,7 +936,8 @@ class AppState: ObservableObject { private func sendSharedFoldersUpdateIfRunning(vmId: String) { guard let session = activeSessions[vmId], session.ipcClient.isConnected, let vm = vms.first(where: { $0.id == vmId }) else { return } - let entries = vm.sharedFolders.map { f in + let folders = vm.sharedFolders + (runtimeSharedFolders[vmId] ?? []) + let entries = folders.map { f in "\(f.tag)|\(f.hostPath)|\(f.readonly ? "1" : "0")" } session.ipcClient.sendSharedFoldersUpdate(entries: entries) @@ -676,5 +1055,10 @@ private struct VmCommandMenuContent: View { appState.showPortForwardsSheet = true } .disabled(vm == nil) + + Button(AgentText("Agent Toolbox...", "Agent急救箱...")) { + appState.showAgentToolsSheet = true + } + .disabled(vm == nil || !isRunning) } } diff --git a/src/manager-macos/Views/AgentToolsView.swift b/src/manager-macos/Views/AgentToolsView.swift new file mode 100644 index 0000000..922052f --- /dev/null +++ b/src/manager-macos/Views/AgentToolsView.swift @@ -0,0 +1,1479 @@ +import SwiftUI +import AppKit +import UniformTypeIdentifiers + +struct AgentToolsSheet: View { + let vmId: String + @ObservedObject private var session: VmSession + @EnvironmentObject var appState: AppState + @Environment(\.dismiss) private var dismiss + + @State private var selectedAgent: AgentKind = .hermes + @State private var runningOperation: AgentToolOperation? + @State private var operationResult: AgentOperationDisplay? + @State private var pendingConfirmation: PendingAgentConfirmation? + @State private var latestBackupText = AgentText("Loading...", "正在读取...") + @State private var latestBackupPath: String? + @State private var backupSchedule = AgentBackupSchedule() + @State private var backupPackages: [AgentBackupPackage] = [] + @State private var selectedBackupId: String? + @State private var showsAdvancedActions = false + @State private var showsAllBackups = false + @State private var selectedOpenClawSourceVmId: String? + @State private var migrationSkillConflictStrategy: AgentSkillConflictStrategy = .skip + @State private var migrationWorkspaceTarget = AgentMigrationOptions().workspaceTarget + @State private var migrationProgress: [AgentMigrationProgress] = [] + + init(vmId: String, session: VmSession) { + self.vmId = vmId + self.session = session + } + + private var vm: VmInfo? { + appState.vms.first(where: { $0.id == vmId }) + } + + private var canRun: Bool { + vm?.state == .running && session.guestAgentConnected && runningOperation == nil + } + + private var confirmationPresented: Binding { + Binding( + get: { pendingConfirmation != nil }, + set: { if !$0 { pendingConfirmation = nil } } + ) + } + + var body: some View { + VStack(spacing: 0) { + header + + Divider() + + ScrollView { + VStack(alignment: .leading, spacing: 16) { + statusPanel + + Picker("Agent", selection: $selectedAgent) { + ForEach(AgentKind.allCases) { agent in + Text(agent.displayName).tag(agent) + } + } + .pickerStyle(.segmented) + + triagePanel + + if let runningOperation { + HStack(spacing: 8) { + ProgressView() + .controlSize(.small) + Text(runningOperation.runningText(agent: selectedAgent)) + .foregroundStyle(.secondary) + } + } + + if runningOperation == .migrateOpenClaw || !migrationProgress.isEmpty { + MigrationProgressView(items: migrationProgress) + } + + if let operationResult { + AgentOperationResultView(result: operationResult) + if let report = operationResult.healthReport, report.state != "ok" { + repairSuggestionPanel(report: report) + } + } + + advancedActionsPanel + + schedulePanel + + backupPickerPanel + } + .padding() + } + } + .frame(width: 640, height: 600) + .onAppear { + loadSchedule() + refreshBackupList() + refreshBackupSummary() + refreshMigrationSourceSelection() + } + .onChange(of: selectedAgent, perform: { _ in + operationResult = nil + migrationProgress = [] + selectedBackupId = nil + showsAllBackups = false + loadSchedule() + refreshBackupList() + refreshBackupSummary() + refreshMigrationSourceSelection() + }) + .alert(pendingConfirmation?.title ?? "", isPresented: confirmationPresented) { + Button("取消", role: .cancel) { + pendingConfirmation = nil + } + if let pendingConfirmation { + Button(pendingConfirmation.confirmTitle, role: .destructive) { + confirmPendingAction(pendingConfirmation) + } + } + } message: { + Text(pendingConfirmation?.message ?? "") + } + } + + private var header: some View { + HStack(spacing: 12) { + Text(AgentText("Agent Toolbox", "Agent急救箱")) + .font(.title3) + .fontWeight(.semibold) + + Spacer() + + Button(AgentText("Done", "完成")) { dismiss() } + .keyboardShortcut(.cancelAction) + } + .padding() + } + + private var statusPanel: some View { + VStack(alignment: .leading, spacing: 10) { + HStack(spacing: 8) { + StatusPill( + title: "虚拟机", + value: vmStateText, + systemImage: "desktopcomputer", + tone: vm?.state == .running ? .ok : .muted + ) + StatusPill( + title: "执行通道", + value: session.guestAgentConnected ? AgentText("Connected", "已连接") : AgentText("Disconnected", "未连接"), + systemImage: "checkmark.seal", + tone: session.guestAgentConnected ? .ok : .warning + ) + StatusPill( + title: AgentText("Latest backup", "最近备份"), + value: latestBackupText, + systemImage: "clock.arrow.circlepath", + tone: latestBackupPath == nil ? .muted : .ok + ) + } + + if vm?.state != .running { + Text("请先启动 VM,再使用 Agent 数据工具。") + .font(.caption) + .foregroundStyle(.secondary) + } else if !session.guestAgentConnected { + Text(AgentText("Import, backup, and health checks require the execution channel to be connected.", "执行通道连接后才能执行导入、备份和健康检查。")) + .font(.caption) + .foregroundStyle(.secondary) + } + } + .padding(12) + .background(.quaternary.opacity(0.7)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + } + + private var triagePanel: some View { + VStack(alignment: .leading, spacing: 10) { + Text(AgentText("First aid", "急救")) + .font(.headline) + + Button { + checkHealth() + } label: { + Label(AgentText("Run diagnosis", "一键诊断"), systemImage: "stethoscope") + .frame(maxWidth: .infinity, alignment: .center) + } + .controlSize(.large) + .disabled(!canRun) + .help(AgentText("Check Agent service, model proxy, browser, and disk status", "检查 Agent 服务、模型代理、浏览器和磁盘状态")) + + Button { + snapshotBackup() + } label: { + Label(AgentText("Back Up Now", "立即备份"), systemImage: "clock.arrow.circlepath") + .frame(maxWidth: .infinity, alignment: .center) + } + .disabled(!canRun) + + Text(AgentText("Start with diagnosis. Import, reset config, or export diagnostics only when the result points there.", "建议先点“一键诊断”。只有需要迁移或人工处理时,再导入、重置配置或导出诊断包。")) + .font(.caption) + .foregroundStyle(.secondary) + } + .padding(12) + .background(.quaternary.opacity(0.45)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + } + + private var schedulePanel: some View { + VStack(alignment: .leading, spacing: 10) { + HStack { + Text(AgentText("Scheduled backup", "定时备份")) + .font(.headline) + Spacer() + Toggle("启用", isOn: scheduleEnabledBinding) + .toggleStyle(.checkbox) + } + + HStack(spacing: 12) { + Picker("时间", selection: scheduleHourBinding) { + ForEach(0..<24, id: \.self) { hour in + Text(String(format: "%02d", hour)).tag(hour) + } + } + .frame(width: 112) + + Picker("分钟", selection: scheduleMinuteBinding) { + ForEach(0..<60, id: \.self) { minute in + Text(String(format: "%02d", minute)).tag(minute) + } + } + .frame(width: 112) + + Stepper(value: scheduleKeepCountBinding, in: 1...99) { + Text(AgentText("Keep up to \(backupSchedule.keepCount)", "最多保留 \(backupSchedule.keepCount) 条")) + } + + Spacer() + } + + Text(scheduleDescription) + .font(.caption) + .foregroundStyle(.secondary) + + if let scheduleStatusText { + Text(scheduleStatusText) + .font(.caption) + .foregroundStyle(backupSchedule.lastAttemptStatus == "failed" ? Color.orange : Color.secondary) + } + } + .padding(12) + .background(.quaternary.opacity(0.45)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + } + + private var scheduleDescription: String { + guard backupSchedule.enabled else { + return AgentText("Scheduled backup is disabled. Default time is 03:00; it only runs when the VM is running and the execution channel is connected.", "定时备份未启用。默认时间为 03:00,只有 VM 运行且执行通道已连接时才会执行。") + } + return AgentText("Backs up daily at \(backupSchedule.timeText); \(nextBackupText).", "每天 \(backupSchedule.timeText) 自动备份;\(nextBackupText)。") + } + + private var scheduleStatusText: String? { + guard let at = backupSchedule.lastAttemptAt, + let status = backupSchedule.lastAttemptStatus else { + return nil + } + if status == "success" { + return AgentText("Last automatic backup: \(at) succeeded", "上次自动备份:\(at) 成功") + } + let message = backupSchedule.lastAttemptMessage ?? AgentText("Failed", "失败") + return AgentText("Last automatic backup failed: \(message) (\(at))", "上次自动备份失败:\(message)(\(at))") + } + + private var nextBackupText: String { + let calendar = Calendar.current + let now = Date() + var components = calendar.dateComponents([.year, .month, .day], from: now) + components.hour = backupSchedule.hour + components.minute = backupSchedule.minute + components.second = 0 + var next = calendar.date(from: components) ?? now + if next <= now { + next = calendar.date(byAdding: .day, value: 1, to: next) ?? next + } + let formatter = DateFormatter() + formatter.dateFormat = calendar.isDateInToday(next) ? "今天 HH:mm" : "明天 HH:mm" + return "下次预计 \(formatter.string(from: next))" + } + + private var backupPickerPanel: some View { + VStack(alignment: .leading, spacing: 10) { + HStack { + Text(AgentText("Backups", "备份列表")) + .font(.headline) + Spacer() + Button { + refreshBackupList() + refreshBackupSummary() + } label: { + Label("刷新", systemImage: "arrow.clockwise") + } + .buttonStyle(.borderless) + } + + if backupPackages.isEmpty { + Text(AgentText("No backups yet.", "还没有备份。")) + .font(.caption) + .foregroundStyle(.secondary) + .frame(maxWidth: .infinity, alignment: .leading) + .padding(.vertical, 4) + } else { + VStack(spacing: 6) { + ForEach(displayedBackupPackages) { package in + BackupPackageRow( + package: package, + isSelected: selectedBackupId == package.id, + reveal: { revealBackup(package) } + ) { + selectedBackupId = package.id + } + } + } + + HStack { + Button { + guard let package = selectedBackupPackage else { return } + pendingConfirmation = .restoreBackup(package) + } label: { + Label(AgentText("Restore Selected Backup", "恢复选中备份"), systemImage: "arrow.uturn.backward") + } + .disabled(!canRun || selectedBackupPackage == nil) + + if backupPackages.count > 3 { + Button(showsAllBackups ? "收起" : "显示全部 \(backupPackages.count) 条") { + showsAllBackups.toggle() + } + .buttonStyle(.link) + } + + Spacer() + } + } + } + .padding(12) + .background(.quaternary.opacity(0.45)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + } + + private var selectedBackupPackage: AgentBackupPackage? { + guard let selectedBackupId else { return nil } + return backupPackages.first { $0.id == selectedBackupId } + } + + private var displayedBackupPackages: [AgentBackupPackage] { + if showsAllBackups { + return backupPackages + } + return Array(backupPackages.prefix(3)) + } + + private var openClawMigrationCandidates: [VmInfo] { + appState.vms + .filter { $0.id != vmId && $0.state == .running } + .sorted { $0.name.localizedCaseInsensitiveCompare($1.name) == .orderedAscending } + } + + private var selectedOpenClawSourceVm: VmInfo? { + guard let selectedOpenClawSourceVmId else { return nil } + return openClawMigrationCandidates.first { $0.id == selectedOpenClawSourceVmId } + } + + private var canMigrateOpenClaw: Bool { + selectedAgent == .hermes && canRun && selectedOpenClawSourceVm != nil + } + + private var advancedActionsPanel: some View { + DisclosureGroup(isExpanded: $showsAdvancedActions) { + VStack(alignment: .leading, spacing: 10) { + if selectedAgent == .hermes { + openClawMigrationPanel + Divider() + } + operationSection( + title: "高级操作", + operations: [.exportProfile, .importProfile, .restartAgent, .resetConfig, .diagnostics] + ) + Text(AgentText("These actions change config, overwrite data, or generate diagnostics. Use them after diagnosis points to a next step.", "这些操作会改动配置、覆盖数据或生成排障包,建议在诊断结果提示后再使用。")) + .font(.caption) + .foregroundStyle(.secondary) + } + .padding(.top, 8) + } label: { + Text("高级操作") + .font(.headline) + } + .padding(12) + .background(.quaternary.opacity(0.45)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + } + + private var openClawMigrationPanel: some View { + VStack(alignment: .leading, spacing: 8) { + Text(AgentText("Migrate from OpenClaw", "从 OpenClaw 迁移")) + .font(.headline) + + HStack(spacing: 10) { + Picker("来源 VM", selection: $selectedOpenClawSourceVmId) { + Text("选择运行中的 OpenClaw VM").tag(String?.none) + ForEach(openClawMigrationCandidates) { vm in + Text(vm.name).tag(Optional(vm.id)) + } + } + .frame(maxWidth: .infinity) + + Button { + guard let sourceVm = selectedOpenClawSourceVm else { return } + pendingConfirmation = .migrateOpenClaw(sourceVm.id) + } label: { + Label(AgentText("Auto Migrate", "自动迁移"), systemImage: "arrow.triangle.2.circlepath") + } + .disabled(!canMigrateOpenClaw) + } + + HStack(spacing: 10) { + Picker(AgentText("Skill conflicts", "技能冲突"), selection: $migrationSkillConflictStrategy) { + ForEach(AgentSkillConflictStrategy.allCases) { strategy in + Text(strategy.displayName).tag(strategy) + } + } + .help(migrationSkillConflictStrategy.help) + + TextField("Workspace 目标", text: $migrationWorkspaceTarget) + .textFieldStyle(.roundedBorder) + .help(AgentText("Destination for OpenClaw workspace instructions in Hermes. Leave empty to use the Hermes default.", "OpenClaw workspace 指令迁移到 Hermes 的目标目录;留空则交给 hermes 默认处理")) + } + + Text(AgentText("This backs up target Hermes, exports full OpenClaw user data, runs the official dry-run, and saves the report on the host. Both VMs must be running with Guest Agent connected.", "会先备份目标 Hermes,导出完整 OpenClaw 用户数据,执行官方 dry-run 并把迁移报告保存到宿主机。两个 VM 都需要运行且 Guest Agent 已连接。")) + .font(.caption) + .foregroundStyle(.secondary) + } + } + + private func repairSuggestionPanel(report: HealthReport) -> some View { + VStack(alignment: .leading, spacing: 10) { + Text("建议修复") + .font(.headline) + + LazyVGrid(columns: [ + GridItem(.flexible(), spacing: 10), + GridItem(.flexible(), spacing: 10) + ], spacing: 10) { + if report.isError("agent_service") || report.isError("gateway_port") { + Button { + restartAgent() + } label: { + Label(AgentText("Restart Service", "重启服务"), systemImage: "arrow.clockwise") + .frame(maxWidth: .infinity) + } + .disabled(!canRun) + } + + if report.isError("llm_proxy") { + Button { + openLlmProxySettings() + } label: { + Label("检查 LLM Proxy", systemImage: "key.viewfinder") + .frame(maxWidth: .infinity) + } + .disabled(runningOperation != nil) + + Button { + pendingConfirmation = .resetConfig + } label: { + Label(AgentText("Reset Model Config", "重置模型配置"), systemImage: "slider.horizontal.2.square") + .frame(maxWidth: .infinity) + } + .disabled(!canRun) + } + + Button { + exportDiagnostics() + } label: { + Label(AgentText("Export Diagnostics", "导出诊断包"), systemImage: "doc.zipper") + .frame(maxWidth: .infinity) + } + .disabled(!canRun) + } + } + .padding(12) + .background(Color.orange.opacity(0.08)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + } + + private var scheduleEnabledBinding: Binding { + Binding( + get: { backupSchedule.enabled }, + set: { backupSchedule.enabled = $0; saveSchedule() } + ) + } + + private var scheduleHourBinding: Binding { + Binding( + get: { backupSchedule.hour }, + set: { backupSchedule.hour = $0; saveSchedule() } + ) + } + + private var scheduleMinuteBinding: Binding { + Binding( + get: { backupSchedule.minute }, + set: { backupSchedule.minute = $0; saveSchedule() } + ) + } + + private var scheduleKeepCountBinding: Binding { + Binding( + get: { backupSchedule.keepCount }, + set: { + backupSchedule.keepCount = $0 + saveSchedule() + refreshBackupList() + refreshBackupSummary() + } + ) + } + + private var vmStateText: String { + switch vm?.state { + case .running: return "运行中" + case .starting: return "启动中" + case .rebooting: return "重启中" + case .crashed: return "异常退出" + case .stopped: return AgentText("Stopped", "已停止") + case .none: return "未知" + } + } + + private func operationSection(title: String, operations: [AgentToolOperation]) -> some View { + VStack(alignment: .leading, spacing: 8) { + Text(title) + .font(.headline) + + LazyVGrid(columns: [ + GridItem(.flexible(), spacing: 10), + GridItem(.flexible(), spacing: 10), + GridItem(.flexible(), spacing: 10) + ], spacing: 10) { + ForEach(operations) { operation in + Button { + run(operation) + } label: { + Label(operation.title, systemImage: operation.systemImage) + .frame(maxWidth: .infinity, alignment: .center) + } + .disabled(!canRun) + .help(operation.help) + } + } + } + } + + private func run(_ operation: AgentToolOperation) { + switch operation { + case .exportProfile: + exportProfile() + case .importProfile: + importProfile() + case .migrateOpenClaw: + migrateOpenClawToHermes() + case .snapshotBackup: + snapshotBackup() + case .restoreBackup: + if let package = selectedBackupPackage { + pendingConfirmation = .restoreBackup(package) + } + case .healthCheck: + checkHealth() + case .restartAgent: + restartAgent() + case .resetConfig: + pendingConfirmation = .resetConfig + case .diagnostics: + exportDiagnostics() + } + } + + private func exportProfile() { + guard let vm = vm else { return } + let panel = NSSavePanel() + panel.title = AgentText("Export Agent Data", "导出 Agent 数据") + panel.nameFieldStringValue = "\(vm.name)-\(selectedAgent.rawValue)-profile.tar.gz" + applyGzipTypeLimit(to: panel) + presentPanel(panel) { response in + guard response == .OK, let url = panel.url else { return } + let destinationURL = Self.normalizedPackageURL(url) + runOperation(.exportProfile, revealPath: destinationURL.path) { + appState.exportAgentProfile(vmId: vm.id, agent: selectedAgent, destinationURL: destinationURL, completion: $0) + } + } + } + + private func importProfile() { + let panel = NSOpenPanel() + panel.title = AgentText("Import Agent Data", "导入 Agent 数据") + panel.canChooseFiles = true + panel.canChooseDirectories = false + panel.allowsMultipleSelection = false + applyGzipTypeLimit(to: panel) + presentPanel(panel) { response in + guard response == .OK, let url = panel.url else { return } + guard Self.isAgentPackageURL(url) else { + operationResult = AgentOperationDisplay( + isSuccess: false, + title: AgentText("Import Failed", "导入失败"), + summary: "请选择 .tar.gz 或 .tgz 文件", + details: url.path, + revealPath: nil, + healthReport: nil + ) + return + } + pendingConfirmation = .importProfile(url) + } + } + + private func presentPanel(_ panel: NSSavePanel, completion: @escaping (NSApplication.ModalResponse) -> Void) { + if let window = NSApplication.shared.keyWindow { + panel.beginSheetModal(for: window, completionHandler: completion) + } else { + panel.begin(completionHandler: completion) + } + } + + private func applyGzipTypeLimit(to panel: NSSavePanel) { + if let gzipType = UTType(filenameExtension: "gz") { + panel.allowedContentTypes = [gzipType] + } + } + + private func confirmPendingAction(_ pending: PendingAgentConfirmation) { + pendingConfirmation = nil + switch pending { + case .importProfile(let url): + guard let vm = vm else { return } + runOperation(.importProfile) { + appState.importAgentProfile(vmId: vm.id, agent: selectedAgent, sourceURL: url, completion: $0) + } + case .migrateOpenClaw(let sourceVmId): + migrateOpenClawToHermes(sourceVmId: sourceVmId) + case .restoreBackup(let package): + restoreBackup(package) + case .resetConfig: + resetConfig() + } + } + + private func migrateOpenClawToHermes(sourceVmId: String? = nil) { + guard let vm = vm else { return } + let resolvedSourceVmId = sourceVmId ?? selectedOpenClawSourceVm?.id + guard let resolvedSourceVmId else { return } + let workspaceTarget = migrationWorkspaceTarget.trimmingCharacters(in: .whitespacesAndNewlines) + guard workspaceTarget.isEmpty || workspaceTarget.hasPrefix("/") else { + operationResult = AgentOperationDisplay( + isSuccess: false, + title: AgentText("Migration Failed", "迁移失败"), + summary: "Workspace 目标必须是绝对路径", + details: workspaceTarget, + revealPath: nil, + healthReport: nil + ) + return + } + let options = AgentMigrationOptions( + skillConflictStrategy: migrationSkillConflictStrategy, + workspaceTarget: workspaceTarget + ) + migrationProgress = [] + runOperation(.migrateOpenClaw) { + appState.migrateOpenClawToHermes( + sourceVmId: resolvedSourceVmId, + targetVmId: vm.id, + options: options, + progress: { item in + DispatchQueue.main.async { + migrationProgress.append(item) + } + }, + completion: $0 + ) + } + } + + private func snapshotBackup() { + guard let vm = vm else { return } + runOperation(.snapshotBackup) { + appState.snapshotAgentBackup(vmId: vm.id, agent: selectedAgent, completion: $0) + } + } + + private func restoreBackup(_ package: AgentBackupPackage) { + guard let vm = vm else { return } + runOperation(.restoreBackup) { + appState.restoreAgentBackup(vmId: vm.id, agent: selectedAgent, packageURL: package.url, completion: $0) + } + } + + private func checkHealth() { + guard let vm = vm else { return } + runOperation(.healthCheck) { + appState.agentHealthStatus(vmId: vm.id, agent: selectedAgent, completion: $0) + } + } + + private func restartAgent() { + guard let vm = vm else { return } + runOperation(.restartAgent) { + appState.restartAgent(vmId: vm.id, agent: selectedAgent, completion: $0) + } + } + + private func openLlmProxySettings() { + dismiss() + DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) { + appState.showLlmProxySheet = true + } + } + + private func resetConfig() { + guard let vm = vm else { return } + runOperation(.resetConfig) { + appState.resetAgentConfig(vmId: vm.id, agent: selectedAgent, completion: $0) + } + } + + private func exportDiagnostics() { + guard let vm = vm else { return } + runOperation(.diagnostics) { + appState.exportAgentDiagnostics(vmId: vm.id, agent: selectedAgent, completion: $0) + } + } + + private func refreshBackupSummary() { + guard let vm = vm else { + latestBackupText = "未知" + latestBackupPath = nil + return + } + appState.agentBackupStatus(vmId: vm.id, agent: selectedAgent) { result in + DispatchQueue.main.async { + switch result { + case .success(let status): + latestBackupPath = Self.extractBackupPath(from: status.output) + if let latestBackupPath { + latestBackupText = Self.compactBackupText(path: latestBackupPath) + } else { + latestBackupText = "暂无" + } + case .failure: + latestBackupText = AgentText("Failed to load", "读取失败") + latestBackupPath = nil + } + } + } + } + + private func refreshBackupList() { + guard let vm = vm else { + backupPackages = [] + selectedBackupId = nil + return + } + backupPackages = appState.listAgentBackups(vmId: vm.id, agent: selectedAgent) + if let selectedBackupId, backupPackages.contains(where: { $0.id == selectedBackupId }) { + return + } + selectedBackupId = backupPackages.first?.id + } + + private func refreshMigrationSourceSelection() { + let candidates = openClawMigrationCandidates + if let selectedOpenClawSourceVmId, + candidates.contains(where: { $0.id == selectedOpenClawSourceVmId }) { + return + } + selectedOpenClawSourceVmId = candidates.first?.id + } + + private func revealBackup(_ package: AgentBackupPackage) { + NSWorkspace.shared.activateFileViewerSelecting([package.url]) + } + + private func loadSchedule() { + backupSchedule = appState.agentBackupSchedule(vmId: vmId, agent: selectedAgent) + } + + private func saveSchedule() { + appState.setAgentBackupSchedule(backupSchedule, vmId: vmId, agent: selectedAgent) + } + + private func runOperation(_ operation: AgentToolOperation, + revealPath: String? = nil, + _ action: (@escaping (Result) -> Void) -> Void) { + operationResult = nil + if operation != .migrateOpenClaw { + migrationProgress = [] + } + runningOperation = operation + action { result in + DispatchQueue.main.async { + runningOperation = nil + switch result { + case .success(let output): + let display = Self.makeSuccessDisplay( + operation: operation, + result: output, + revealPath: revealPath + ) + operationResult = display + if operation == .healthCheck { + showsAdvancedActions = display.healthReport?.state != "ok" + } + refreshBackupSummary() + refreshBackupList() + case .failure(let error): + operationResult = Self.makeFailureDisplay(operation: operation, error: error) + if operation == .healthCheck { + showsAdvancedActions = true + } + refreshBackupSummary() + refreshBackupList() + } + } + } + } + + private static func makeSuccessDisplay(operation: AgentToolOperation, + result: AgentToolResult, + revealPath: String?) -> AgentOperationDisplay { + let raw = result.output.trimmingCharacters(in: .whitespacesAndNewlines) + let detectedPath = revealPath ?? operation.revealPath(from: result) + let health = operation.showsHealth ? HealthReport.parse(from: raw) : nil + let rawSummary = result.message.trimmingCharacters(in: .whitespacesAndNewlines) + let summary = health?.state == "ok" ? (health?.message ?? rawSummary) : rawSummary + let details = operation == .healthCheck && health?.state == "ok" ? "" : raw + return AgentOperationDisplay( + isSuccess: true, + title: operation.successTitle, + summary: compactSummary(summary, fallback: AgentText("Operation completed", "操作已完成")), + details: details, + revealPath: detectedPath, + healthReport: health + ) + } + + private static func makeFailureDisplay(operation: AgentToolOperation, error: Error) -> AgentOperationDisplay { + let raw = error.localizedDescription.trimmingCharacters(in: .whitespacesAndNewlines) + return AgentOperationDisplay( + isSuccess: false, + title: operation.failureTitle, + summary: compactSummary(friendlyErrorMessage(raw), fallback: AgentText("Operation failed", "操作失败")), + details: raw, + revealPath: nil, + healthReport: nil + ) + } + + private static func extractBackupPath(from output: String) -> String? { + let prefix = AgentText("Latest backup: ", "最近备份:") + for line in output.split(whereSeparator: { $0.isNewline }) { + let text = String(line).trimmingCharacters(in: .whitespaces) + if text.hasPrefix(prefix) { + return String(text.dropFirst(prefix.count)) + } + } + return nil + } + + private static func isAgentPackageURL(_ url: URL) -> Bool { + let name = url.lastPathComponent.lowercased() + return name.hasSuffix(".tar.gz") || name.hasSuffix(".tgz") + } + + private static func normalizedPackageURL(_ url: URL) -> URL { + if isAgentPackageURL(url) { + return url + } + if url.lastPathComponent.lowercased().hasSuffix(".gz") { + return url.deletingPathExtension().appendingPathExtension("tar.gz") + } + return url.appendingPathExtension("tar.gz") + } + + private static func compactBackupText(path: String) -> String { + let url = URL(fileURLWithPath: path) + if let attrs = try? FileManager.default.attributesOfItem(atPath: path), + let date = attrs[.modificationDate] as? Date { + let formatter = DateFormatter() + formatter.dateFormat = "MM-dd HH:mm" + return formatter.string(from: date) + } + return url.lastPathComponent + } + + private static func friendlyErrorMessage(_ raw: String) -> String { + if raw.isEmpty { return AgentText("Operation failed", "操作失败") } + let checks: [(String, String)] = [ + ("VM not found", AgentText("VM was not found", "找不到 VM")), + ("VM runtime is not connected", AgentText("VM runtime is not connected", "VM 运行时未连接")), + ("Guest agent is not connected", AgentText("Guest Agent is not connected", "Guest Agent 未连接")), + ("Command timed out", AgentText("Operation timed out", "操作超时")), + ("Failed to send guest agent command", AgentText("Failed to send Guest Agent command", "发送 Guest Agent 命令失败")), + ("Agent data is not initialized", AgentText("Agent data is not initialized", "Agent 数据尚未初始化")), + ("OpenClaw 数据尚未初始化", AgentText("OpenClaw data is not initialized", "OpenClaw 数据尚未初始化")), + ("缺少 Hermes 命令", AgentText("Target VM is missing the Hermes command", "目标 VM 缺少 Hermes 命令")), + ("缺少 OpenClaw 命令", AgentText("VM is missing the OpenClaw command", "VM 缺少 OpenClaw 命令")), + ("No backup package found", AgentText("No restorable backup was found", "没有找到可恢复的备份")), + ("package not found", AgentText("Import package was not found", "找不到导入包")), + ("manifest.json missing", AgentText("Import package is missing manifest.json", "导入包缺少 manifest.json")), + ("files.tar.gz missing", AgentText("Import package is missing files.tar.gz", "导入包缺少 files.tar.gz")), + ("Model proxy is unavailable", AgentText("Model proxy unavailable", "模型代理不可用")), + ("Browser is unavailable", AgentText("Browser is unavailable", "浏览器不可用")), + ("Disk space is low", AgentText("Disk space is low", "磁盘空间不足")), + ("Agent service is not running", AgentText("Agent service is not running", "Agent 服务未运行")), + ("Agent gateway is unavailable", AgentText("Agent gateway is unavailable", "Agent 网关不可用")) + ] + for (needle, message) in checks where raw.contains(needle) { + return message + } + return raw + } + + private static func compactSummary(_ text: String, fallback: String) -> String { + let lines = text + .split(whereSeparator: { $0.isNewline }) + .map { String($0).trimmingCharacters(in: .whitespacesAndNewlines) } + .filter { !$0.isEmpty } + guard let first = lines.first else { return fallback } + let limit = 180 + guard first.count > limit else { return first } + return AgentText("\(first.prefix(limit)) ... copy details for full output", "\(first.prefix(limit)) ... 完整输出请复制详情") + } +} + +private enum AgentToolOperation: String, CaseIterable, Identifiable { + case exportProfile + case importProfile + case migrateOpenClaw + case snapshotBackup + case restoreBackup + case healthCheck + case restartAgent + case resetConfig + case diagnostics + + var id: String { rawValue } + + var title: String { + switch self { + case .exportProfile: return AgentText("Export Migration Package", "导出迁移包") + case .importProfile: return AgentText("Import", "导入") + case .migrateOpenClaw: return AgentText("OpenClaw Migration", "OpenClaw 迁移") + case .snapshotBackup: return AgentText("Back Up Now", "立即备份") + case .restoreBackup: return AgentText("Restore Backup", "恢复备份") + case .healthCheck: return AgentText("Run diagnosis", "一键诊断") + case .restartAgent: return AgentText("Restart Service", "重启服务") + case .resetConfig: return AgentText("Reset Config", "重置配置") + case .diagnostics: return AgentText("Export Diagnostics", "导出诊断") + } + } + + var systemImage: String { + switch self { + case .exportProfile: return "square.and.arrow.up" + case .importProfile: return "square.and.arrow.down" + case .migrateOpenClaw: return "arrow.triangle.2.circlepath" + case .snapshotBackup: return "clock.arrow.circlepath" + case .restoreBackup: return "arrow.uturn.backward" + case .healthCheck: return "stethoscope" + case .restartAgent: return "arrow.clockwise" + case .resetConfig: return "slider.horizontal.2.square" + case .diagnostics: return "doc.zipper" + } + } + + var help: String { + switch self { + case .exportProfile: return AgentText("Export current Agent data", "导出当前 Agent 数据") + case .importProfile: return AgentText("Import Agent data from an archive", "从归档包导入 Agent 数据") + case .migrateOpenClaw: return AgentText("Migrate from a running OpenClaw VM to this Hermes VM", "从运行中的 OpenClaw VM 迁移到当前 Hermes VM") + case .snapshotBackup: return AgentText("Create a host-side backup", "创建一份主机侧备份") + case .restoreBackup: return AgentText("Restore Agent data from the selected backup", "用选中的备份恢复 Agent 数据") + case .healthCheck: return "检查 Agent 运行状态" + case .restartAgent: return "重启 Agent 服务" + case .resetConfig: return AgentText("Reset Agent model configuration", "重置 Agent 模型配置") + case .diagnostics: return AgentText("Export Diagnostics", "导出诊断包") + } + } + + var successTitle: String { + switch self { + case .exportProfile: return AgentText("Export Complete", "导出完成") + case .importProfile: return AgentText("Import Complete", "导入完成") + case .migrateOpenClaw: return AgentText("Migration Complete", "迁移完成") + case .snapshotBackup: return AgentText("Backup Complete", "备份完成") + case .restoreBackup: return AgentText("Restore Complete", "恢复完成") + case .healthCheck: return AgentText("Diagnosis Complete", "诊断完成") + case .restartAgent: return AgentText("Restart Complete", "重启完成") + case .resetConfig: return AgentText("Config Reset", "配置已重置") + case .diagnostics: return AgentText("Diagnostics Exported", "诊断包已导出") + } + } + + var failureTitle: String { + switch self { + case .exportProfile: return AgentText("Export Failed", "导出失败") + case .importProfile: return AgentText("Import Failed", "导入失败") + case .migrateOpenClaw: return AgentText("Migration Failed", "迁移失败") + case .snapshotBackup: return AgentText("Backup Failed", "备份失败") + case .restoreBackup: return AgentText("Restore Failed", "恢复失败") + case .healthCheck: return AgentText("Diagnosis Failed", "诊断失败") + case .restartAgent: return AgentText("Restart Failed", "重启失败") + case .resetConfig: return AgentText("Reset Failed", "重置失败") + case .diagnostics: return AgentText("Diagnostics Export Failed", "诊断导出失败") + } + } + + var showsHealth: Bool { + switch self { + case .healthCheck, .restartAgent, .resetConfig: return true + default: return false + } + } + + func runningText(agent: AgentKind) -> String { + switch self { + case .exportProfile: return AgentText("Exporting \(agent.displayName) data...", "正在导出 \(agent.displayName) 数据...") + case .importProfile: return AgentText("Importing \(agent.displayName) data...", "正在导入 \(agent.displayName) 数据...") + case .migrateOpenClaw: return AgentText("Migrating from OpenClaw VM to Hermes...", "正在从 OpenClaw VM 迁移到 Hermes...") + case .snapshotBackup: return AgentText("Backing up \(agent.displayName) data...", "正在备份 \(agent.displayName) 数据...") + case .restoreBackup: return AgentText("Restoring \(agent.displayName) backup...", "正在恢复 \(agent.displayName) 备份...") + case .healthCheck: return AgentText("Diagnosing \(agent.displayName) status...", "正在诊断 \(agent.displayName) 状态...") + case .restartAgent: return AgentText("Restarting \(agent.displayName) service...", "正在重启 \(agent.displayName) 服务...") + case .resetConfig: return AgentText("Resetting \(agent.displayName) config...", "正在重置 \(agent.displayName) 配置...") + case .diagnostics: return AgentText("Exporting \(agent.displayName) diagnostics...", "正在导出 \(agent.displayName) 诊断包...") + } + } + + func revealPath(from result: AgentToolResult) -> String? { + let raw = result.output.trimmingCharacters(in: .whitespacesAndNewlines) + switch self { + case .snapshotBackup, .restoreBackup, .diagnostics: + return raw.split(whereSeparator: { $0.isNewline }).map(String.init).last + case .migrateOpenClaw: + let prefix = AgentText("Migration report: ", "迁移报告:") + return raw + .split(whereSeparator: { $0.isNewline }) + .map { String($0).trimmingCharacters(in: .whitespaces) } + .first { $0.hasPrefix(prefix) } + .map { String($0.dropFirst(prefix.count)) } + default: + return nil + } + } +} + +private enum PendingAgentConfirmation: Identifiable { + case importProfile(URL) + case migrateOpenClaw(String) + case restoreBackup(AgentBackupPackage) + case resetConfig + + var id: String { + switch self { + case .importProfile(let url): return "import-\(url.path)" + case .migrateOpenClaw(let sourceVmId): return "migrate-openclaw-\(sourceVmId)" + case .restoreBackup(let package): return "restore-\(package.id)" + case .resetConfig: return "reset" + } + } + + var title: String { + switch self { + case .importProfile: return AgentText("Import Agent data?", "确认导入 Agent 数据?") + case .migrateOpenClaw: return AgentText("Auto-migrate from OpenClaw VM?", "确认从 OpenClaw VM 自动迁移?") + case .restoreBackup: return AgentText("Restore this backup?", "确认恢复这个备份?") + case .resetConfig: return AgentText("Reset configuration?", "确认重置配置?") + } + } + + var message: String { + switch self { + case .importProfile(let url): + return AgentText("Import will replace current Agent data. File: \(url.lastPathComponent)", "导入会替换当前 Agent 数据。文件:\(url.lastPathComponent)") + case .migrateOpenClaw: + return AgentText("Migration backs up target Hermes data, runs a dry-run preflight, then imports source OpenClaw user data, secrets, memory, skills, and compatible config.", "迁移会先备份目标 Hermes 数据,执行 dry-run 预检,再导入来源 OpenClaw 的用户数据、密钥、记忆、技能和兼容配置。") + case .restoreBackup(let package): + return AgentText("Restore will overwrite current Agent data with the selected backup. File: \(package.filename)", "恢复会用选中的备份覆盖当前 Agent 数据。文件:\(package.filename)") + case .resetConfig: + return AgentText("Reset will overwrite current Agent model configuration.", "重置会覆盖当前 Agent 模型配置。") + } + } + + var confirmTitle: String { + switch self { + case .importProfile: return AgentText("Import", "导入") + case .migrateOpenClaw: return AgentText("Migrate", "迁移") + case .restoreBackup: return AgentText("Restore", "恢复") + case .resetConfig: return AgentText("Reset", "重置") + } + } +} + +private struct AgentOperationDisplay: Identifiable { + let id = UUID() + let isSuccess: Bool + let title: String + let summary: String + let details: String + let revealPath: String? + let healthReport: HealthReport? +} + +private struct BackupPackageRow: View { + let package: AgentBackupPackage + let isSelected: Bool + let reveal: () -> Void + let select: () -> Void + + var body: some View { + HStack(spacing: 8) { + Button(action: select) { + HStack(spacing: 8) { + Image(systemName: isSelected ? "checkmark.circle.fill" : "circle") + .foregroundStyle(isSelected ? Color.accentColor : Color.secondary) + VStack(alignment: .leading, spacing: 2) { + Text(package.filename) + .fontWeight(.medium) + .lineLimit(1) + .truncationMode(.middle) + Text("\(Self.dateText(package.modifiedAt)) · \(Self.sizeText(package.sizeBytes))") + .font(.caption) + .foregroundStyle(.secondary) + } + Spacer() + } + .contentShape(Rectangle()) + } + .buttonStyle(.plain) + .frame(maxWidth: .infinity, alignment: .leading) + + Button(action: reveal) { + Image(systemName: "folder") + } + .buttonStyle(.borderless) + .help("在 Finder 中显示") + } + .padding(.horizontal, 8) + .padding(.vertical, 6) + .background(isSelected ? Color.accentColor.opacity(0.12) : Color.clear) + .clipShape(RoundedRectangle(cornerRadius: 6)) + } + + private static func dateText(_ date: Date) -> String { + let formatter = DateFormatter() + formatter.dateFormat = "yyyy-MM-dd HH:mm:ss" + return formatter.string(from: date) + } + + private static func sizeText(_ bytes: Int64) -> String { + ByteCountFormatter.string(fromByteCount: bytes, countStyle: .file) + } +} + +private struct MigrationProgressView: View { + let items: [AgentMigrationProgress] + private static let maxLineCharacters = 96 + private static let maxDetailCharacters = 96 + + var body: some View { + VStack(alignment: .leading, spacing: 8) { + Text(AgentText("Migration Progress", "迁移进度")) + .font(.headline) + + if items.isEmpty { + HStack(spacing: 8) { + ProgressView() + .controlSize(.small) + Text(AgentText("Preparing migration...", "准备迁移...")) + .foregroundStyle(.secondary) + } + } else { + VStack(alignment: .leading, spacing: 8) { + ForEach(items) { item in + HStack(alignment: .top, spacing: 8) { + Image(systemName: item.step == .complete ? "checkmark.circle.fill" : "circle.fill") + .font(.system(size: 8)) + .foregroundStyle(item.step == .complete ? Color.green : Color.accentColor) + .padding(.top, 5) + VStack(alignment: .leading, spacing: 2) { + Text(Self.compact("\(item.step.title):\(item.message)", limit: Self.maxLineCharacters)) + .font(.caption) + .lineLimit(1) + .truncationMode(.middle) + if let detail = item.detail, !detail.isEmpty { + Text(Self.compact(detail, limit: Self.maxDetailCharacters)) + .font(.caption2) + .foregroundStyle(.secondary) + .lineLimit(1) + .truncationMode(.middle) + } + } + Spacer() + } + } + } + } + } + .padding(12) + .background(Color.accentColor.opacity(0.08)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + } + + private static func compact(_ text: String, limit: Int) -> String { + guard text.count > limit else { return text } + let headCount = max(1, limit * 2 / 3) + let tailCount = max(1, limit - headCount) + return "\(text.prefix(headCount)) ... \(text.suffix(tailCount))" + } +} + +private struct AgentOperationResultView: View { + let result: AgentOperationDisplay + @State private var showsDetails = false + private static let maxRenderedDetailCharacters = 12_000 + + var body: some View { + VStack(alignment: .leading, spacing: 10) { + HStack(spacing: 8) { + Image(systemName: result.isSuccess ? "checkmark.circle.fill" : "xmark.octagon.fill") + .foregroundStyle(result.isSuccess ? .green : .red) + VStack(alignment: .leading, spacing: 2) { + Text(result.title) + .fontWeight(.semibold) + Text(result.summary) + .font(.caption) + .foregroundStyle(.secondary) + .lineLimit(2) + } + Spacer() + } + + if let report = result.healthReport, report.state != "ok" { + HealthReportView(report: report) + } + + HStack(spacing: 12) { + if let path = result.revealPath, !path.isEmpty { + Button { + NSWorkspace.shared.activateFileViewerSelecting([URL(fileURLWithPath: path)]) + } label: { + Label(AgentText("Show in Finder", "在 Finder 中显示"), systemImage: "folder") + } + .buttonStyle(.link) + } + + if !result.details.isEmpty { + Button { + NSPasteboard.general.clearContents() + NSPasteboard.general.setString(result.details, forType: .string) + } label: { + Label(AgentText("Copy Details", "复制详情"), systemImage: "doc.on.doc") + } + .buttonStyle(.link) + } + + Spacer() + } + + if !result.details.isEmpty { + DisclosureGroup(isExpanded: $showsDetails) { + if showsDetails { + ScrollView { + Text(Self.renderedDetails(result.details)) + .font(.system(.caption, design: .monospaced)) + .textSelection(.enabled) + .frame(maxWidth: .infinity, alignment: .leading) + .padding(.top, 4) + } + .frame(maxHeight: 140) + } + } label: { + Text(result.details.count > Self.maxRenderedDetailCharacters ? AgentText("Details (truncated; copy for full content)", "详情(已截断显示,可复制完整内容)") : AgentText("Details", "详情")) + .font(.caption) + } + } + } + .padding(12) + .background(result.isSuccess ? Color.green.opacity(0.08) : Color.red.opacity(0.08)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + } + + private static func renderedDetails(_ details: String) -> String { + guard details.count > maxRenderedDetailCharacters else { return details } + let headCount = maxRenderedDetailCharacters / 2 + let tailCount = maxRenderedDetailCharacters - headCount + return """ + \(String(details.prefix(headCount))) + + \(AgentText("... details are long, so the view shows the beginning and end; copy for full content, and check the migration report for full logs ...", "... 详情过长,界面只显示前后片段;完整内容可复制,迁移完整日志请查看报告文件 ...")) + + \(String(details.suffix(tailCount))) + """ + } +} + +private struct StatusPill: View { + enum Tone { + case ok + case warning + case muted + } + + let title: String + let value: String + let systemImage: String + let tone: Tone + + private var color: Color { + switch tone { + case .ok: return .green + case .warning: return .orange + case .muted: return .secondary + } + } + + var body: some View { + HStack(spacing: 6) { + Image(systemName: systemImage) + .foregroundStyle(color) + VStack(alignment: .leading, spacing: 1) { + Text(title) + .font(.caption2) + .foregroundStyle(.secondary) + Text(value) + .font(.caption) + .fontWeight(.medium) + .lineLimit(1) + .truncationMode(.middle) + } + } + .padding(.horizontal, 8) + .padding(.vertical, 6) + .frame(maxWidth: .infinity, alignment: .leading) + .background(.background.opacity(0.6)) + .clipShape(RoundedRectangle(cornerRadius: 6)) + } +} + +private struct HealthReport { + let state: String + let message: String + let checks: [HealthCheckItem] + + func value(_ key: String) -> String { + checks.first { $0.key == key }?.value ?? "unknown" + } + + func isError(_ key: String) -> Bool { + let state = value(key) + return state == "error" || state == "space_low" + } + + static func parse(from raw: String) -> HealthReport? { + let jsonLine = raw + .split(whereSeparator: { $0.isNewline }) + .map { String($0).trimmingCharacters(in: .whitespacesAndNewlines) } + .first { $0.hasPrefix("{") && $0.hasSuffix("}") } + guard let jsonLine, + let data = jsonLine.data(using: .utf8), + let object = try? JSONSerialization.jsonObject(with: data) as? [String: Any] else { + return nil + } + let checks = object["checks"] as? [String: Any] ?? [:] + return HealthReport( + state: object["state"] as? String ?? "unknown", + message: translateMessage(object["message"] as? String ?? ""), + checks: [ + HealthCheckItem(key: "agent_service", title: "Agent 服务", value: checks["agent_service"] as? String ?? "unknown"), + HealthCheckItem(key: "gateway_port", title: "网关端口", value: checks["gateway_port"] as? String ?? "unknown"), + HealthCheckItem(key: "llm_proxy", title: AgentText("Model proxy", "模型代理"), value: checks["llm_proxy"] as? String ?? "unknown"), + HealthCheckItem(key: "browser", title: "浏览器", value: checks["browser"] as? String ?? "unknown"), + HealthCheckItem(key: "disk", title: "磁盘空间", value: checks["disk"] as? String ?? "unknown") + ] + ) + } + + private static func translateMessage(_ message: String) -> String { + switch message { + case "Agent normal": return "Agent 正常" + case "Disk space is low": return "磁盘空间不足" + case "Agent service is not running": return "Agent 服务未运行" + case "Agent gateway is unavailable": return "Agent 网关不可用" + case "Model proxy is unavailable": return AgentText("Model proxy unavailable", "模型代理不可用") + case "Browser is unavailable": return "浏览器不可用" + case "Model proxy is available": return AgentText("Model proxy available", "模型代理可用") + default: return message.isEmpty ? "状态未知" : message + } + } +} + +private struct HealthCheckItem: Identifiable { + let id = UUID() + let key: String + let title: String + let value: String + + var displayValue: String { + switch value { + case "ok": return "正常" + case "error": return "异常" + case "skipped": return "跳过" + case "space_low": return "空间不足" + default: return "未知" + } + } + + var color: Color { + switch value { + case "ok", "skipped": return .green + case "space_low": return .orange + case "error": return .red + default: return .secondary + } + } + + var icon: String { + switch value { + case "ok", "skipped": return "checkmark.circle.fill" + case "space_low": return "exclamationmark.triangle.fill" + case "error": return "xmark.octagon.fill" + default: return "questionmark.circle" + } + } +} + +private struct HealthReportView: View { + let report: HealthReport + + var body: some View { + VStack(alignment: .leading, spacing: 8) { + Text(report.message) + .font(.caption) + .foregroundStyle(report.state == "ok" ? Color.secondary : Color.red) + + LazyVGrid(columns: [ + GridItem(.flexible(), spacing: 8), + GridItem(.flexible(), spacing: 8) + ], spacing: 8) { + ForEach(report.checks) { item in + HStack(spacing: 6) { + Image(systemName: item.icon) + .foregroundStyle(item.color) + Text(item.title) + Spacer() + Text(item.displayValue) + .foregroundStyle(.secondary) + } + .font(.caption) + .padding(.horizontal, 8) + .padding(.vertical, 6) + .background(.background.opacity(0.65)) + .clipShape(RoundedRectangle(cornerRadius: 6)) + } + } + } + } +} diff --git a/src/manager-macos/Views/ContentView.swift b/src/manager-macos/Views/ContentView.swift index 5c142ad..960b354 100644 --- a/src/manager-macos/Views/ContentView.swift +++ b/src/manager-macos/Views/ContentView.swift @@ -85,6 +85,12 @@ struct ContentView: View { Divider() + Button(action: { appState.showAgentToolsSheet = true }) { + Label("Agent急救箱", systemImage: "cross.case") + } + .disabled(vm.state != .running) + .help("打开 Agent 急救箱") + Button(action: { appState.showSharedFoldersSheet = true }) { ToolbarBadgeLabel( title: "Shared Folders", @@ -144,6 +150,11 @@ struct ContentView: View { .sheet(isPresented: $appState.showLlmProxySheet) { LlmProxySheet() } + .sheet(isPresented: $appState.showAgentToolsSheet) { + if let vm = selectedVm { + AgentToolsSheet(vmId: vm.id, session: appState.getOrCreateSession(for: vm.id)) + } + } .alert("Delete VM", isPresented: $appState.showDeleteConfirm) { Button("Cancel", role: .cancel) {} Button("Delete", role: .destructive) { diff --git a/src/manager-macos/Views/VmDetailView.swift b/src/manager-macos/Views/VmDetailView.swift index 9a32088..979e22d 100644 --- a/src/manager-macos/Views/VmDetailView.swift +++ b/src/manager-macos/Views/VmDetailView.swift @@ -26,6 +26,22 @@ class VmSession: ObservableObject { private weak var clipboardHandler: ClipboardHandler? private var connecting = false private static let maxConsoleSize = 64 * 1024 + private var pendingConsoleCommands: [String: PendingConsoleCommand] = [:] + private var nextGuestExecRequestId: UInt64 = 1 + private var pendingGuestExecCommands: [UInt64: PendingGuestExecCommand] = [:] + + private struct PendingConsoleCommand { + let beginMarker: String + let endPrefix: String + let completion: (Result) -> Void + let beginTimeoutWorkItem: DispatchWorkItem + let timeoutWorkItem: DispatchWorkItem + } + + private struct PendingGuestExecCommand { + let completion: (Result) -> Void + let timeoutWorkItem: DispatchWorkItem + } init(vmId: String, clipboardHandler: ClipboardHandler) { self.vmId = vmId @@ -47,6 +63,16 @@ class VmSession: ObservableObject { ipcClient.onGuestAgentState = { [weak self] conn in self?.guestAgentConnected = conn } + ipcClient.onGuestExecResult = { [weak self] requestId, ok, exitCode, stdoutText, stderrText, error in + self?.finishGuestExecCommand( + requestId: requestId, + ok: ok, + exitCode: exitCode, + stdoutText: stdoutText, + stderrText: stderrText, + error: error + ) + } ipcClient.onFrame = { [weak self] pixelBytes, pixelLength, w, h, stride, resW, resH, dirtyX, dirtyY in guard let self = self, let renderer = self.renderer else { return } @@ -100,6 +126,7 @@ class VmSession: ObservableObject { self.connected = false self.connecting = false self.displayInitialized = false + self.failPendingGuestExecCommands(ConsoleCommandError("VM runtime disconnected")) } setupClipboardCallbacks() @@ -204,6 +231,7 @@ class VmSession: ObservableObject { func disconnect() { audioPlayer.stop() + failPendingGuestExecCommands(ConsoleCommandError("VM runtime disconnected")) ipcClient.disconnect() connected = false connecting = false @@ -213,12 +241,158 @@ class VmSession: ObservableObject { ipcClient.sendConsoleInput(text) } + func runGuestAgentCommand(_ command: String, timeout: TimeInterval = 120, + completion: @escaping (Result) -> Void) { + DispatchQueue.main.async { + guard self.connected, self.ipcClient.isConnected else { + completion(.failure(ConsoleCommandError("VM runtime is not connected"))) + return + } + guard self.guestAgentConnected else { + completion(.failure(ConsoleCommandError("Guest agent is not connected"))) + return + } + + let requestId = self.nextGuestExecRequestId + self.nextGuestExecRequestId += 1 + let timeoutMs = UInt32(min(max(timeout * 1000, 1000), 600000)) + let timeoutWorkItem = DispatchWorkItem { [weak self] in + guard let self = self else { return } + if let pending = self.pendingGuestExecCommands.removeValue(forKey: requestId) { + pending.completion(.failure(ConsoleCommandError("Command timed out"))) + } + } + + self.pendingGuestExecCommands[requestId] = PendingGuestExecCommand( + completion: completion, + timeoutWorkItem: timeoutWorkItem + ) + DispatchQueue.main.asyncAfter(deadline: .now() + timeout, execute: timeoutWorkItem) + + self.ipcClient.sendGuestExecAsync(command: command, user: "tenbox", requestId: requestId, timeoutMs: timeoutMs) { [weak self] sent in + guard let self = self, !sent else { return } + guard let pending = self.pendingGuestExecCommands.removeValue(forKey: requestId) else { return } + pending.timeoutWorkItem.cancel() + pending.completion(.failure(ConsoleCommandError("Failed to send guest agent command"))) + } + } + } + + func runShellCommand(_ command: String, timeout: TimeInterval = 120, + completion: @escaping (Result) -> Void) { + DispatchQueue.main.async { + guard self.connected, self.ipcClient.isConnected else { + completion(.failure(ConsoleCommandError("VM console is not connected"))) + return + } + + let token = UUID().uuidString.replacingOccurrences(of: "-", with: "") + let beginMarker = "__TENBOX_CMD_BEGIN_\(token)__" + let endPrefix = "__TENBOX_CMD_END_\(token)__:" + let quotedCommand = Self.shellQuote(command) + let beginTimeoutWorkItem = DispatchWorkItem { [weak self] in + guard let self = self else { return } + guard let pending = self.pendingConsoleCommands[token] else { return } + if self.consoleText.range(of: pending.beginMarker, options: .backwards) == nil { + pending.timeoutWorkItem.cancel() + self.pendingConsoleCommands.removeValue(forKey: token) + pending.completion(.failure(ConsoleCommandError("VM shell did not start the command"))) + } + } + let timeoutWorkItem = DispatchWorkItem { [weak self] in + guard let self = self else { return } + if let pending = self.pendingConsoleCommands.removeValue(forKey: token) { + pending.beginTimeoutWorkItem.cancel() + pending.completion(.failure(ConsoleCommandError("Command timed out"))) + } + } + + self.pendingConsoleCommands[token] = PendingConsoleCommand( + beginMarker: beginMarker, + endPrefix: endPrefix, + completion: completion, + beginTimeoutWorkItem: beginTimeoutWorkItem, + timeoutWorkItem: timeoutWorkItem + ) + + DispatchQueue.main.asyncAfter(deadline: .now() + 12, execute: beginTimeoutWorkItem) + DispatchQueue.main.asyncAfter(deadline: .now() + timeout, execute: timeoutWorkItem) + let quotedToken = Self.shellQuote(token) + let wrapped = "stty -echo 2>/dev/null; __tenbox_token=\(quotedToken); __tenbox_begin=\"__TENBOX_CMD_BEGIN_${__tenbox_token}__\"; __tenbox_end=\"__TENBOX_CMD_END_${__tenbox_token}__:\"; printf '\\n%s\\n' \"$__tenbox_begin\"; /bin/sh -lc \(quotedCommand); rc=$?; printf '\\n%s%s\\n' \"$__tenbox_end\" \"$rc\"; stty echo 2>/dev/null\n" + self.sendConsoleInput(wrapped) + } + } + + private func finishGuestExecCommand(requestId: UInt64, ok: Bool, exitCode: Int32, + stdoutText: String, stderrText: String, + error: String?) { + guard let pending = pendingGuestExecCommands.removeValue(forKey: requestId) else { + return + } + pending.timeoutWorkItem.cancel() + + let output: String + if !stdoutText.isEmpty && !stderrText.isEmpty { + output = stdoutText + "\n" + stderrText + } else { + output = stdoutText + stderrText + } + + if ok { + pending.completion(.success(ConsoleCommandResult(exitCode: exitCode, output: output))) + } else { + let message = error ?? (output.isEmpty ? "Guest agent command failed" : output) + pending.completion(.failure(ConsoleCommandError(message))) + } + } + + private func failPendingGuestExecCommands(_ error: Error) { + let pending = pendingGuestExecCommands + pendingGuestExecCommands.removeAll() + for (_, command) in pending { + command.timeoutWorkItem.cancel() + command.completion(.failure(error)) + } + } + private func appendConsoleText(_ text: String) { consoleText.append(text) if consoleText.count > Self.maxConsoleSize { let excess = consoleText.count - Self.maxConsoleSize * 3 / 4 consoleText.removeFirst(excess) } + checkPendingConsoleCommands() + } + + private func checkPendingConsoleCommands() { + for token in Array(pendingConsoleCommands.keys) { + guard let pending = pendingConsoleCommands[token], + let endRange = consoleText.range(of: pending.endPrefix, options: .backwards) else { + continue + } + let afterEnd = consoleText[endRange.upperBound...] + guard let lineEnd = afterEnd.firstIndex(where: { $0 == "\n" }) else { continue } + let exitText = afterEnd[.. String { + "'" + value.replacingOccurrences(of: "'", with: "'\\''") + "'" } static func filterAnsi(_ input: String) -> String { diff --git a/src/manager/CMakeLists.txt b/src/manager/CMakeLists.txt index a9e3b64..b423c5b 100644 --- a/src/manager/CMakeLists.txt +++ b/src/manager/CMakeLists.txt @@ -2,6 +2,7 @@ add_executable(tenbox-manager WIN32 ${CMAKE_SOURCE_DIR}/src/manager/main.cpp ${CMAKE_SOURCE_DIR}/src/manager/manager_service.cpp ${CMAKE_SOURCE_DIR}/src/manager/app_settings.cpp + ${CMAKE_SOURCE_DIR}/src/manager/agent_tools_service.cpp ${CMAKE_SOURCE_DIR}/src/common/image_source.cpp ${CMAKE_SOURCE_DIR}/src/manager/http_download.cpp ${CMAKE_SOURCE_DIR}/src/manager/app.manifest @@ -13,6 +14,7 @@ add_executable(tenbox-manager WIN32 ${CMAKE_SOURCE_DIR}/src/manager/ui/shared_folders_dialog.cpp ${CMAKE_SOURCE_DIR}/src/manager/ui/port_forward_dialog.cpp ${CMAKE_SOURCE_DIR}/src/manager/ui/llm_proxy_dialog.cpp + ${CMAKE_SOURCE_DIR}/src/manager/ui/agent_tools_dialog.cpp ${CMAKE_SOURCE_DIR}/src/manager/ui/settings_dialog.cpp ${CMAKE_SOURCE_DIR}/src/manager/ui/win32_display_panel.cpp ${CMAKE_SOURCE_DIR}/src/manager/ui/info_tab.cpp @@ -43,6 +45,7 @@ target_link_libraries(tenbox-manager ole32 winhttp bcrypt + crypt32 ws2_32 ) @@ -53,6 +56,14 @@ add_custom_command(TARGET tenbox-manager POST_BUILD "$" ) +add_custom_command(TARGET tenbox-manager POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory + "$/AgentTools" + COMMAND ${CMAKE_COMMAND} -E copy_if_different + "${CMAKE_SOURCE_DIR}/src/agent_tools/guest/agent_tools.sh" + "$/AgentTools/agent_tools.sh" +) + if(MSVC) # Mitigate Defender heuristic false positives (Wacatac.H!ml): # - /GL whole-program optimization produces cleaner code layout diff --git a/src/manager/agent_tools_service.cpp b/src/manager/agent_tools_service.cpp new file mode 100644 index 0000000..b8f88ad --- /dev/null +++ b/src/manager/agent_tools_service.cpp @@ -0,0 +1,693 @@ +#include "manager/agent_tools_service.h" + +#include "manager/app_settings.h" +#include "manager/i18n.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#endif + +namespace agent_tools { +namespace fs = std::filesystem; + +namespace { + +ToolResult Failure(std::string message, std::string output = {}) { + ToolResult r; + r.ok = false; + r.message = std::move(message); + r.output = std::move(output); + return r; +} + +ToolResult Success(std::string message, std::string output = {}) { + ToolResult r; + r.ok = true; + r.message = std::move(message); + r.output = std::move(output); + return r; +} + +std::string Text(const char* en, const char* zh) { + return i18n::GetCurrentLanguage() == i18n::Lang::kChineseSimplified ? zh : en; +} + +std::string NormalizeTagSeed(std::string value) { + value.erase(std::remove(value.begin(), value.end(), '-'), value.end()); + if (value.size() > 8) value.resize(8); + std::transform(value.begin(), value.end(), value.begin(), + [](unsigned char c) { return static_cast(std::tolower(c)); }); + return value.empty() ? "00000000" : value; +} + +} // namespace + +const char* AgentRawValue(AgentKind agent) { + return agent == AgentKind::kHermes ? "hermes" : "openclaw"; +} + +const char* AgentDisplayName(AgentKind agent) { + return agent == AgentKind::kHermes ? "Hermes" : "OpenClaw"; +} + +std::string SkillConflictRawValue(SkillConflictStrategy strategy) { + switch (strategy) { + case SkillConflictStrategy::kOverwrite: return "overwrite"; + case SkillConflictStrategy::kRename: return "rename"; + case SkillConflictStrategy::kSkip: + default: return "skip"; + } +} + +std::string SkillConflictDisplayName(SkillConflictStrategy strategy) { + switch (strategy) { + case SkillConflictStrategy::kOverwrite: return Text("Overwrite Hermes skills", "技能覆盖 Hermes"); + case SkillConflictStrategy::kRename: return Text("Rename imported skills", "技能重命名导入"); + case SkillConflictStrategy::kSkip: + default: return Text("Keep Hermes skills", "技能保留 Hermes"); + } +} + +AgentToolsService::AgentToolsService(ManagerService& manager, std::string data_dir) + : manager_(manager), data_dir_(std::move(data_dir)) {} + +std::string AgentToolsService::Timestamp() { + auto now = std::chrono::system_clock::now(); + std::time_t t = std::chrono::system_clock::to_time_t(now); + std::tm tm{}; +#ifdef _WIN32 + localtime_s(&tm, &t); +#else + localtime_r(&t, &tm); +#endif + std::ostringstream os; + os << std::put_time(&tm, "%Y-%m-%d-%H%M%S"); + return os.str(); +} + +std::string AgentToolsService::PathFilename(const std::string& path) { + auto u8 = fs::path(path).filename().u8string(); + return std::string(reinterpret_cast(u8.data()), u8.size()); +} + +std::string AgentToolsService::Dirname(const std::string& path) { + auto u8 = fs::path(path).parent_path().u8string(); + return std::string(reinterpret_cast(u8.data()), u8.size()); +} + +std::string AgentToolsService::OperationBaseDirectory() const { + return (fs::path(data_dir_) / "AgentOperations").string(); +} + +std::string AgentToolsService::BackupBaseDirectory(const std::string& vm_id) const { + return (fs::path(data_dir_) / "AgentBackups" / vm_id).string(); +} + +std::string AgentToolsService::BackupPackageDirectory(const std::string& vm_id, AgentKind agent) const { + return (fs::path(BackupBaseDirectory(vm_id)) / AgentRawValue(agent)).string(); +} + +std::string AgentToolsService::NewBackupPackagePath(const std::string& vm_id, AgentKind agent) const { + return (fs::path(BackupPackageDirectory(vm_id, agent)) / + ("agent-data-" + Timestamp() + ".tar.gz")).string(); +} + +std::string AgentToolsService::NewMigrationReportPath(const std::string& vm_id) const { + return (fs::path(BackupPackageDirectory(vm_id, AgentKind::kHermes)) / + ("openclaw-migration-" + Timestamp() + ".txt")).string(); +} + +bool AgentToolsService::IsRunnable(const std::string& vm_id, std::string* error) const { + auto vm = manager_.GetVm(vm_id); + if (!vm) { + if (error) *error = Text("VM was not found", "找不到 VM"); + return false; + } + if (vm->state != VmPowerState::kRunning) { + if (error) *error = Text("VM is not running", "VM 未运行"); + return false; + } + if (!vm->guest_agent_connected) { + if (error) *error = Text("Guest Agent is not connected", "Guest Agent 未连接"); + return false; + } + return true; +} + +void AgentToolsService::WithOperationShare(const std::vector& vm_ids, + ShareCallback cb, + ToolCallback failure_cb) { + std::error_code ec; + fs::create_directories(OperationBaseDirectory(), ec); + if (ec) { + failure_cb(Failure(Text("Failed to create temporary directory", "创建临时目录失败"), ec.message())); + return; + } + + const std::string tag = "tenbox-agent-ops-" + NormalizeTagSeed(settings::GenerateUuid()); + fs::path dir = fs::path(OperationBaseDirectory()) / (tag + "-" + NormalizeTagSeed(settings::GenerateUuid())); + fs::create_directories(dir, ec); + if (ec) { + failure_cb(Failure(Text("Failed to create temporary shared directory", "创建临时共享目录失败"), ec.message())); + return; + } + + ShareLease lease; + lease.folder = SharedFolder{tag, dir.string(), false}; + lease.vm_ids = vm_ids; + lease.cleanup_dir = dir.string(); + + for (const auto& vm_id : vm_ids) { + std::string error; + if (!manager_.AddRuntimeSharedFolder(vm_id, lease.folder, &error)) { + CleanupShare(lease); + failure_cb(Failure(Text("Failed to mount temporary shared directory", "挂载临时共享目录失败"), error)); + return; + } + } + cb(std::move(lease)); +} + +void AgentToolsService::WithBackupShare(const std::string& vm_id, + ShareCallback cb, + ToolCallback failure_cb) { + std::error_code ec; + fs::create_directories(BackupBaseDirectory(vm_id), ec); + if (ec) { + failure_cb(Failure(Text("Failed to create backup directory", "创建备份目录失败"), ec.message())); + return; + } + const std::string tag = "tenbox-agent-backups-" + NormalizeTagSeed(settings::GenerateUuid()); + + ShareLease lease; + lease.folder = SharedFolder{tag, BackupBaseDirectory(vm_id), false}; + lease.vm_ids = {vm_id}; + + std::string error; + if (!manager_.AddRuntimeSharedFolder(vm_id, lease.folder, &error)) { + failure_cb(Failure(Text("Failed to mount backup directory", "挂载备份目录失败"), error)); + return; + } + cb(std::move(lease)); +} + +void AgentToolsService::CleanupShare(const ShareLease& lease) { + for (const auto& vm_id : lease.vm_ids) { + std::string ignored; + manager_.RemoveRuntimeSharedFolder(vm_id, lease.folder.tag, &ignored); + } + if (!lease.cleanup_dir.empty()) { + std::error_code ec; + fs::remove_all(lease.cleanup_dir, ec); + } +} + +void AgentToolsService::RunCommand(const std::string& vm_id, const std::string& command, + uint32_t timeout_ms, ToolCallback cb) { + std::string error; + if (!IsRunnable(vm_id, &error)) { + cb(Failure(error)); + return; + } + manager_.RunGuestAgentCommand(vm_id, command, timeout_ms, + [cb = std::move(cb)](ManagerService::GuestExecResult result) mutable { + const std::string output = result.CombinedOutput(); + if (!result.ok) { + std::string message = result.error.empty() ? Text("Guest Agent command failed", "Guest Agent 命令执行失败") : result.error; + if (message.find("/bin/sh") != std::string::npos || message.find("No such file") != std::string::npos) { + message = Text("Agent tools require a Linux guest OS.", "Agent 工具箱需要 Linux Guest OS。"); + } + cb(Failure(message, output)); + return; + } + if (result.exit_code != 0) { + std::string message = output.empty() ? Text("Agent operation failed", "Agent 操作失败") : output; + if (message.find("Agent tools require a Linux guest OS") != std::string::npos) { + message = Text("Agent tools require a Linux guest OS.", "Agent 工具箱需要 Linux Guest OS。"); + } + cb(Failure(message, output)); + return; + } + cb(Success("ok", output)); + }); +} + +std::vector AgentToolsService::ListBackups(const std::string& vm_id, AgentKind agent) const { + std::vector result; + const fs::path dir = BackupPackageDirectory(vm_id, agent); + std::error_code ec; + if (!fs::exists(dir, ec)) return result; + for (const auto& entry : fs::directory_iterator(dir, ec)) { + if (ec || !entry.is_regular_file()) continue; + const auto path = entry.path(); + const auto name_u8 = path.filename().u8string(); + const std::string name(reinterpret_cast(name_u8.data()), name_u8.size()); + if (name.rfind("agent-data-", 0) != 0 || path.extension() != ".gz") continue; + BackupPackage pkg; + pkg.path = path.string(); + pkg.filename = name; + pkg.size = static_cast(entry.file_size(ec)); + auto ft = entry.last_write_time(ec); + if (!ec) { + auto sctp = std::chrono::time_point_cast( + ft - fs::file_time_type::clock::now() + std::chrono::system_clock::now()); + pkg.modified_at = sctp; + } + result.push_back(std::move(pkg)); + } + std::sort(result.begin(), result.end(), + [](const BackupPackage& a, const BackupPackage& b) { + return a.modified_at > b.modified_at; + }); + return result; +} + +void AgentToolsService::RotateBackups(const std::string& vm_id, AgentKind agent, int keep_count) { + auto packages = ListBackups(vm_id, agent); + if (keep_count < 1) keep_count = 1; + for (size_t i = static_cast(keep_count); i < packages.size(); ++i) { + std::error_code ec; + fs::remove(packages[i].path, ec); + } +} + +std::string AgentToolsService::ShellQuote(const std::string& value) { + std::string out = "'"; + for (char ch : value) { + if (ch == '\'') out += "'\\''"; + else out.push_back(ch); + } + out += "'"; + return out; +} + +std::string AgentToolsService::WithSharedFolderReady(const std::string& tag, const std::string& body) { + const std::string path = "/mnt/shared/" + tag; + return + "set -eu\n" + "share_dir=" + ShellQuote(path) + "\n" + "i=0\n" + "while [ \"$i\" -lt 100 ]; do\n" + " if [ -d \"$share_dir\" ] && [ -w \"$share_dir\" ]; then break; fi\n" + " i=$((i + 1)); sleep 0.2\n" + "done\n" + "[ -d \"$share_dir\" ] || { echo \"Shared folder is not mounted: $share_dir\" >&2; exit 1; }\n" + "[ -w \"$share_dir\" ] || { echo \"Shared folder is not writable: $share_dir\" >&2; exit 1; }\n" + + body + "\n"; +} + +bool AgentToolsService::PrepareAgentToolScript(const ShareLease& lease, std::string* error) const { + std::vector candidates; +#ifdef _WIN32 + wchar_t module_path[MAX_PATH]; + DWORD len = GetModuleFileNameW(nullptr, module_path, MAX_PATH); + if (len > 0 && len < MAX_PATH) { + fs::path module_dir = fs::path(module_path).parent_path(); + candidates.push_back(module_dir / "AgentTools" / "agent_tools.sh"); + candidates.push_back(module_dir.parent_path().parent_path() / "src" / "agent_tools" / "guest" / "agent_tools.sh"); + } +#endif + candidates.push_back(fs::current_path() / "src" / "agent_tools" / "guest" / "agent_tools.sh"); + + fs::path source; + std::error_code ec; + for (const auto& candidate : candidates) { + if (fs::exists(candidate, ec) && fs::is_regular_file(candidate, ec)) { + source = candidate; + break; + } + } + if (source.empty()) { + if (error) *error = "Agent tools script was not found."; + return false; + } + + fs::path destination = fs::path(lease.folder.host_path) / "agent_tools.sh"; + fs::copy_file(source, destination, fs::copy_options::overwrite_existing, ec); + if (ec) { + if (error) *error = "Failed to copy Agent tools script: " + ec.message(); + return false; + } + return true; +} + +std::string AgentToolsService::ScriptInvocation(const std::string& tag, const std::vector& args) { + const std::string script = "/mnt/shared/" + tag + "/agent_tools.sh"; + std::ostringstream os; + os << "script=" << ShellQuote(script) << "\n" + << "[ -f \"$script\" ] || { echo \"Agent tools script is missing: $script\" >&2; exit 1; }\n" + << "chmod +x \"$script\" 2>/dev/null || true\n" + << "/bin/sh \"$script\""; + for (const auto& arg : args) { + os << " " << ShellQuote(arg); + } + os << "\n"; + return os.str(); +} + +std::string AgentToolsService::ScriptCommand(const std::string& tag, const std::vector& args) { + return WithSharedFolderReady(tag, ScriptInvocation(tag, args)); +} + +void AgentToolsService::ExportProfile(const std::string& vm_id, AgentKind agent, + const std::string& destination_path, ToolCallback cb) { + ToolCallback failure_cb = cb; + WithOperationShare({vm_id}, [this, vm_id, agent, destination_path, cb = std::move(cb)](ShareLease lease) mutable { + std::string script_error; + if (!PrepareAgentToolScript(lease, &script_error)) { + CleanupShare(lease); + cb(Failure(script_error)); + return; + } + const std::string package_name = PathFilename(destination_path).empty() + ? std::string(AgentRawValue(agent)) + "-profile.tar.gz" + : PathFilename(destination_path); + const std::string guest_package = "/mnt/shared/" + lease.folder.tag + "/" + package_name; + const std::string command = ScriptCommand(lease.folder.tag, + {"export-profile", AgentRawValue(agent), guest_package, "migration"}); + RunCommand(vm_id, command, 420000, [this, lease, destination_path, guest_package, cb = std::move(cb)](ToolResult result) mutable { + CleanupShare(lease); + if (!result.ok) { + cb(result); + return; + } + std::error_code ec; + fs::copy_file(fs::path(lease.folder.host_path) / PathFilename(guest_package), + destination_path, fs::copy_options::overwrite_existing, ec); + if (ec) cb(Failure(Text("Failed to copy exported package", "复制导出包失败"), ec.message())); + else cb(Success(Text("Agent data exported", "已导出 Agent 数据"), destination_path)); + }); + }, std::move(failure_cb)); +} + +void AgentToolsService::ImportProfile(const std::string& vm_id, AgentKind agent, + const std::string& source_path, ToolCallback cb) { + ToolCallback failure_cb = cb; + WithOperationShare({vm_id}, [this, vm_id, agent, source_path, cb = std::move(cb)](ShareLease lease) mutable { + std::string script_error; + if (!PrepareAgentToolScript(lease, &script_error)) { + CleanupShare(lease); + cb(Failure(script_error)); + return; + } + const std::string package_name = "tenbox-agent-profile-import.tar.gz"; + std::error_code ec; + fs::copy_file(source_path, fs::path(lease.folder.host_path) / package_name, + fs::copy_options::overwrite_existing, ec); + if (ec) { + CleanupShare(lease); + cb(Failure(Text("Failed to copy import package", "复制导入包失败"), ec.message())); + return; + } + const std::string guest_package = "/mnt/shared/" + lease.folder.tag + "/" + package_name; + const std::string command = ScriptCommand(lease.folder.tag, + {"import-profile", AgentRawValue(agent), guest_package}); + RunCommand(vm_id, command, 420000, [this, lease, agent, cb = std::move(cb)](ToolResult result) mutable { + CleanupShare(lease); + if (!result.ok) cb(result); + else cb(Success(Text("Agent data imported", "已导入 Agent 数据"), result.output)); + }); + }, std::move(failure_cb)); +} + +void AgentToolsService::SnapshotBackup(const std::string& vm_id, AgentKind agent, + int keep_count, ToolCallback cb) { + std::error_code ec; + fs::create_directories(BackupPackageDirectory(vm_id, agent), ec); + if (ec) { + cb(Failure(Text("Failed to create backup directory", "创建备份目录失败"), ec.message())); + return; + } + const std::string package = NewBackupPackagePath(vm_id, agent); + ToolCallback failure_cb = cb; + WithBackupShare(vm_id, [this, vm_id, agent, keep_count, package, cb = std::move(cb)](ShareLease lease) mutable { + std::string script_error; + if (!PrepareAgentToolScript(lease, &script_error)) { + CleanupShare(lease); + cb(Failure(script_error)); + return; + } + const std::string guest_dir = "/mnt/shared/" + lease.folder.tag + "/" + AgentRawValue(agent); + const std::string guest_package = guest_dir + "/" + PathFilename(package); + const std::string command = WithSharedFolderReady( + lease.folder.tag, + "mkdir -p " + ShellQuote(guest_dir) + "\n" + + ScriptInvocation(lease.folder.tag, {"export-profile", AgentRawValue(agent), guest_package, "backup"})); + RunCommand(vm_id, command, 420000, [this, lease, vm_id, agent, keep_count, package, cb = std::move(cb)](ToolResult result) mutable { + CleanupShare(lease); + if (!result.ok) { + cb(result); + return; + } + RotateBackups(vm_id, agent, keep_count); + cb(Success(Text("Agent data backup created", "已创建 Agent 数据备份"), package)); + }); + }, std::move(failure_cb)); +} + +void AgentToolsService::RestoreBackup(const std::string& vm_id, AgentKind agent, + const std::string& package_path, ToolCallback cb) { + ToolCallback failure_cb = cb; + WithBackupShare(vm_id, [this, vm_id, agent, package_path, cb = std::move(cb)](ShareLease lease) mutable { + std::string script_error; + if (!PrepareAgentToolScript(lease, &script_error)) { + CleanupShare(lease); + cb(Failure(script_error)); + return; + } + const std::string guest_package = "/mnt/shared/" + lease.folder.tag + "/" + + std::string(AgentRawValue(agent)) + "/" + PathFilename(package_path); + const std::string command = ScriptCommand(lease.folder.tag, + {"import-profile", AgentRawValue(agent), guest_package}); + RunCommand(vm_id, command, 420000, [this, lease, package_path, cb = std::move(cb)](ToolResult result) mutable { + CleanupShare(lease); + if (!result.ok) cb(result); + else cb(Success(Text("Agent data backup restored", "已恢复 Agent 数据备份"), package_path)); + }); + }, std::move(failure_cb)); +} + +void AgentToolsService::RunHealthCommand(const std::string& vm_id, AgentKind agent, + const std::string&, + const std::string& success_message, + ToolCallback cb) { + ToolCallback failure_cb = cb; + WithOperationShare({vm_id}, [this, vm_id, agent, success_message, cb = std::move(cb)](ShareLease lease) mutable { + std::string script_error; + if (!PrepareAgentToolScript(lease, &script_error)) { + CleanupShare(lease); + cb(Failure(script_error)); + return; + } + RunCommand(vm_id, ScriptCommand(lease.folder.tag, {"health", AgentRawValue(agent)}), 180000, + [this, lease, success_message, cb = std::move(cb)](ToolResult result) mutable { + CleanupShare(lease); + if (!result.ok) cb(result); + else cb(Success(success_message, result.output)); + }); + }, std::move(failure_cb)); +} + +void AgentToolsService::HealthStatus(const std::string& vm_id, AgentKind agent, ToolCallback cb) { + RunHealthCommand(vm_id, agent, {}, Text("Health status refreshed", "健康状态已更新"), std::move(cb)); +} + +void AgentToolsService::RunRepairCommand(const std::string& vm_id, AgentKind agent, + const std::vector& repair_args, + const std::string& success_message, + int keep_count, + ToolCallback cb) { + std::error_code ec; + fs::create_directories(BackupPackageDirectory(vm_id, agent), ec); + if (ec) { + cb(Failure(Text("Failed to create backup directory", "创建备份目录失败"), ec.message())); + return; + } + const std::string package = NewBackupPackagePath(vm_id, agent); + ToolCallback failure_cb = cb; + WithBackupShare(vm_id, [this, vm_id, agent, repair_args, success_message, keep_count, package, cb = std::move(cb)](ShareLease lease) mutable { + std::string script_error; + if (!PrepareAgentToolScript(lease, &script_error)) { + CleanupShare(lease); + cb(Failure(script_error)); + return; + } + const std::string guest_dir = "/mnt/shared/" + lease.folder.tag + "/" + AgentRawValue(agent); + const std::string guest_package = guest_dir + "/" + PathFilename(package); + const std::string command = WithSharedFolderReady( + lease.folder.tag, + "mkdir -p " + ShellQuote(guest_dir) + "\n" + + ScriptInvocation(lease.folder.tag, {"export-profile", AgentRawValue(agent), guest_package, "backup"}) + + ScriptInvocation(lease.folder.tag, repair_args)); + RunCommand(vm_id, command, 420000, + [this, lease, vm_id, agent, keep_count, package, success_message, cb = std::move(cb)](ToolResult result) mutable { + CleanupShare(lease); + if (!result.ok) { + cb(result); + return; + } + RotateBackups(vm_id, agent, keep_count); + cb(Success(success_message, + Text("Pre-repair backup: ", "修复前备份:") + package + "\n" + result.output)); + }); + }, std::move(failure_cb)); +} + +void AgentToolsService::RestartAgent(const std::string& vm_id, AgentKind agent, + int keep_count, ToolCallback cb) { + RunRepairCommand(vm_id, agent, {"restart", AgentRawValue(agent)}, + Text("Agent restarted", "已重新启动 Agent"), keep_count, std::move(cb)); +} + +void AgentToolsService::ResetAgentConfig(const std::string& vm_id, AgentKind agent, + int keep_count, ToolCallback cb) { + RunRepairCommand(vm_id, agent, {"reset-config", AgentRawValue(agent)}, + Text("Agent configuration reset", "已重置 Agent 配置"), keep_count, std::move(cb)); +} + +void AgentToolsService::ExportDiagnostics(const std::string& vm_id, AgentKind agent, ToolCallback cb) { + ToolCallback failure_cb = cb; + WithBackupShare(vm_id, [this, vm_id, agent, cb = std::move(cb)](ShareLease lease) mutable { + std::string script_error; + if (!PrepareAgentToolScript(lease, &script_error)) { + CleanupShare(lease); + cb(Failure(script_error)); + return; + } + const std::string guest_dir = "/mnt/shared/" + lease.folder.tag; + const std::string command = ScriptCommand(lease.folder.tag, + {"diagnostics", AgentRawValue(agent), guest_dir}); + RunCommand(vm_id, command, 180000, [this, lease, cb = std::move(cb)](ToolResult result) mutable { + CleanupShare(lease); + if (!result.ok) cb(result); + else cb(Success(Text("Diagnostics exported", "已导出诊断包"), result.output)); + }); + }, std::move(failure_cb)); +} + +void AgentToolsService::MigrateOpenClawToHermes(const std::string& source_vm_id, + const std::string& target_vm_id, + const MigrationOptions& options, + int keep_count, + ProgressCallback progress, + ToolCallback cb) { + std::string source_error; + if (!IsRunnable(source_vm_id, &source_error)) { + cb(Failure(Text("OpenClaw source VM ", "OpenClaw 来源 VM ") + source_error)); + return; + } + std::string target_error; + if (!IsRunnable(target_vm_id, &target_error)) { + cb(Failure(Text("Hermes target VM ", "Hermes 目标 VM ") + target_error)); + return; + } + + std::error_code ec; + fs::create_directories(BackupPackageDirectory(target_vm_id, AgentKind::kHermes), ec); + if (ec) { + cb(Failure(Text("Failed to create migration directory", "创建迁移目录失败"), ec.message())); + return; + } + const std::string backup_package = NewBackupPackagePath(target_vm_id, AgentKind::kHermes); + const std::string report_path = NewMigrationReportPath(target_vm_id); + + ToolCallback backup_failure_cb = cb; + WithBackupShare(target_vm_id, + [this, source_vm_id, target_vm_id, options, keep_count, progress, cb = std::move(cb), backup_package, report_path](ShareLease backup_lease) mutable { + std::string script_error; + if (!PrepareAgentToolScript(backup_lease, &script_error)) { + CleanupShare(backup_lease); + cb(Failure(script_error)); + return; + } + ToolCallback op_failure_cb = cb; + WithOperationShare({source_vm_id, target_vm_id}, + [this, source_vm_id, target_vm_id, options, keep_count, progress, cb = std::move(cb), backup_package, report_path, backup_lease](ShareLease op_lease) mutable { + auto cleanup_all = [this, backup_lease, op_lease]() { + CleanupShare(op_lease); + CleanupShare(backup_lease); + }; + std::string script_error; + if (!PrepareAgentToolScript(op_lease, &script_error)) { + cleanup_all(); + cb(Failure(script_error)); + return; + } + const std::string guest_backup_dir = "/mnt/shared/" + backup_lease.folder.tag + "/hermes"; + const std::string guest_backup = guest_backup_dir + "/" + PathFilename(backup_package); + const std::string guest_report = guest_backup_dir + "/" + PathFilename(report_path); + const std::string backup_command = WithSharedFolderReady( + backup_lease.folder.tag, + "mkdir -p " + ShellQuote(guest_backup_dir) + "\n" + + ScriptInvocation(backup_lease.folder.tag, {"export-profile", AgentRawValue(AgentKind::kHermes), guest_backup, "backup"})); + if (progress) progress("backup", Text("Creating target Hermes pre-migration backup", "正在创建目标 Hermes 迁移前备份"), PathFilename(backup_package)); + RunCommand(target_vm_id, backup_command, 420000, + [this, source_vm_id, target_vm_id, options, keep_count, progress, cb = std::move(cb), backup_package, report_path, op_lease, backup_lease, cleanup_all, guest_report](ToolResult backup_result) mutable { + if (!backup_result.ok) { + cleanup_all(); + cb(backup_result); + return; + } + const std::string archive_path = "/mnt/shared/" + op_lease.folder.tag + "/openclaw-source.tar.gz"; + const std::string export_command = ScriptCommand(op_lease.folder.tag, + {"export-openclaw-source", archive_path}); + if (progress) progress("exportSource", Text("Exporting OpenClaw user data from source VM", "正在从来源 VM 导出 OpenClaw 用户数据"), ""); + RunCommand(source_vm_id, export_command, 420000, + [this, target_vm_id, options, keep_count, progress, cb = std::move(cb), backup_package, report_path, archive_path, op_lease, backup_lease, cleanup_all, guest_report](ToolResult export_result) mutable { + if (!export_result.ok) { + cleanup_all(); + cb(export_result); + return; + } + const std::string dry_command = ScriptCommand(op_lease.folder.tag, + {"migrate-openclaw-dry-run", archive_path, guest_report, + SkillConflictRawValue(options.skill_conflict), options.workspace_target}); + if (progress) progress("dryRun", Text("Generating official dry-run migration plan", "正在生成官方 dry-run 迁移计划"), SkillConflictDisplayName(options.skill_conflict)); + RunCommand(target_vm_id, dry_command, 420000, + [this, target_vm_id, options, keep_count, progress, cb = std::move(cb), backup_package, report_path, archive_path, op_lease, backup_lease, cleanup_all, guest_report](ToolResult dry_result) mutable { + if (!dry_result.ok) { + cleanup_all(); + cb(dry_result); + return; + } + const std::string migrate_command = ScriptCommand(op_lease.folder.tag, + {"migrate-openclaw-apply", archive_path, guest_report, + SkillConflictRawValue(options.skill_conflict), options.workspace_target}); + if (progress) progress("migrate", Text("Dry run passed; applying migration", "dry-run 已通过,正在执行正式迁移"), PathFilename(report_path)); + RunCommand(target_vm_id, migrate_command, 600000, + [this, target_vm_id, keep_count, progress, cb = std::move(cb), backup_package, report_path, cleanup_all](ToolResult migrate_result) mutable { + cleanup_all(); + if (!migrate_result.ok) { + cb(migrate_result); + return; + } + RotateBackups(target_vm_id, AgentKind::kHermes, keep_count); + if (progress) progress("complete", Text("Migration completed; report saved", "迁移完成,报告已保存"), PathFilename(report_path)); + cb(Success(Text("OpenClaw to Hermes migration completed", "已完成 OpenClaw 到 Hermes 迁移"), + Text("Pre-migration backup: ", "迁移前备份:") + backup_package + + "\n" + Text("Migration report: ", "迁移报告:") + report_path + "\n" + migrate_result.output)); + }); + }); + }); + }); + }, + [this, backup_lease, cb = std::move(op_failure_cb)](ToolResult failure) mutable { + CleanupShare(backup_lease); + cb(std::move(failure)); + }); + }, + std::move(backup_failure_cb)); +} + +} // namespace agent_tools diff --git a/src/manager/agent_tools_service.h b/src/manager/agent_tools_service.h new file mode 100644 index 0000000..f7a8132 --- /dev/null +++ b/src/manager/agent_tools_service.h @@ -0,0 +1,140 @@ +#pragma once + +#include "manager/manager_service.h" + +#include +#include +#include +#include +#include + +namespace agent_tools { + +enum class AgentKind { + kHermes, + kOpenClaw, +}; + +enum class SkillConflictStrategy { + kSkip, + kOverwrite, + kRename, +}; + +struct MigrationOptions { + SkillConflictStrategy skill_conflict = SkillConflictStrategy::kSkip; + std::string workspace_target = "/home/tenbox/.hermes/workspace/openclaw-migrated"; +}; + +struct ToolResult { + bool ok = false; + std::string message; + std::string output; +}; + +struct BackupPackage { + std::string path; + std::string filename; + uint64_t size = 0; + std::chrono::system_clock::time_point modified_at{}; +}; + +struct BackupSchedule { + bool enabled = false; + int hour = 3; + int minute = 0; + int keep_count = 7; + std::string last_run_date; + std::string last_attempt_at; + std::string last_attempt_status; + std::string last_attempt_message; +}; + +using ToolCallback = std::function; +using ProgressCallback = std::function; + +const char* AgentRawValue(AgentKind agent); +const char* AgentDisplayName(AgentKind agent); +std::string SkillConflictRawValue(SkillConflictStrategy strategy); +std::string SkillConflictDisplayName(SkillConflictStrategy strategy); + +class AgentToolsService { +public: + AgentToolsService(ManagerService& manager, std::string data_dir); + + std::vector ListBackups(const std::string& vm_id, AgentKind agent) const; + void RotateBackups(const std::string& vm_id, AgentKind agent, int keep_count); + + void ExportProfile(const std::string& vm_id, AgentKind agent, + const std::string& destination_path, ToolCallback cb); + void ImportProfile(const std::string& vm_id, AgentKind agent, + const std::string& source_path, ToolCallback cb); + void SnapshotBackup(const std::string& vm_id, AgentKind agent, + int keep_count, ToolCallback cb); + void RestoreBackup(const std::string& vm_id, AgentKind agent, + const std::string& package_path, ToolCallback cb); + void HealthStatus(const std::string& vm_id, AgentKind agent, ToolCallback cb); + void RestartAgent(const std::string& vm_id, AgentKind agent, + int keep_count, ToolCallback cb); + void ResetAgentConfig(const std::string& vm_id, AgentKind agent, + int keep_count, ToolCallback cb); + void ExportDiagnostics(const std::string& vm_id, AgentKind agent, ToolCallback cb); + void MigrateOpenClawToHermes(const std::string& source_vm_id, + const std::string& target_vm_id, + const MigrationOptions& options, + int keep_count, + ProgressCallback progress, + ToolCallback cb); + +private: + struct ShareLease { + SharedFolder folder; + std::vector vm_ids; + std::string cleanup_dir; + }; + + using ShareCallback = std::function; + + bool IsRunnable(const std::string& vm_id, std::string* error) const; + void WithOperationShare(const std::vector& vm_ids, + ShareCallback cb, + ToolCallback failure_cb); + void WithBackupShare(const std::string& vm_id, + ShareCallback cb, + ToolCallback failure_cb); + void CleanupShare(const ShareLease& lease); + void RunCommand(const std::string& vm_id, const std::string& command, + uint32_t timeout_ms, ToolCallback cb); + void RunHealthCommand(const std::string& vm_id, AgentKind agent, + const std::string& command, + const std::string& success_message, + ToolCallback cb); + void RunRepairCommand(const std::string& vm_id, AgentKind agent, + const std::vector& repair_args, + const std::string& success_message, + int keep_count, + ToolCallback cb); + + std::string OperationBaseDirectory() const; + std::string BackupBaseDirectory(const std::string& vm_id) const; + std::string BackupPackageDirectory(const std::string& vm_id, AgentKind agent) const; + std::string NewBackupPackagePath(const std::string& vm_id, AgentKind agent) const; + std::string NewMigrationReportPath(const std::string& vm_id) const; + + bool PrepareAgentToolScript(const ShareLease& lease, std::string* error) const; + + static std::string ShellQuote(const std::string& value); + static std::string PathFilename(const std::string& path); + static std::string Dirname(const std::string& path); + static std::string Timestamp(); + static std::string WithSharedFolderReady(const std::string& tag, const std::string& body); + static std::string ScriptInvocation(const std::string& tag, const std::vector& args); + static std::string ScriptCommand(const std::string& tag, const std::vector& args); + + ManagerService& manager_; + std::string data_dir_; +}; + +} // namespace agent_tools diff --git a/src/manager/app_settings.cpp b/src/manager/app_settings.cpp index 3542d0d..0206b92 100644 --- a/src/manager/app_settings.cpp +++ b/src/manager/app_settings.cpp @@ -11,6 +11,7 @@ #include #include +#include #include namespace settings { @@ -156,6 +157,25 @@ AppSettings LoadSettings(const std::string& data_dir) { if (lp.contains("enable_logging") && lp["enable_logging"].is_boolean()) s.llm_proxy.enable_logging = lp["enable_logging"].get(); } + if (j.contains("agent_backups") && j["agent_backups"].is_object()) { + auto& ab = j["agent_backups"]; + if (ab.contains("schedules") && ab["schedules"].is_object()) { + for (auto it = ab["schedules"].begin(); it != ab["schedules"].end(); ++it) { + if (!it.value().is_object()) continue; + AgentBackupSchedule schedule; + auto& item = it.value(); + schedule.enabled = item.value("enabled", false); + schedule.hour = std::clamp(item.value("hour", 3), 0, 23); + schedule.minute = std::clamp(item.value("minute", 0), 0, 59); + schedule.keep_count = std::clamp(item.value("keep_count", 7), 1, 99); + schedule.last_run_date = item.value("last_run_date", ""); + schedule.last_attempt_at = item.value("last_attempt_at", ""); + schedule.last_attempt_status = item.value("last_attempt_status", ""); + schedule.last_attempt_message = item.value("last_attempt_message", ""); + s.agent_backup_schedules[it.key()] = std::move(schedule); + } + } + } if (j.contains("vm_paths") && j["vm_paths"].is_array()) { auto default_storage = DefaultVmStorageDir(); for (auto& item : j["vm_paths"]) { @@ -231,6 +251,27 @@ void SaveSettings(const std::string& data_dir, const AppSettings& s) { j["llm_proxy"] = lp; } + { + json schedules = json::object(); + for (const auto& [key, schedule] : s.agent_backup_schedules) { + json item; + item["enabled"] = schedule.enabled; + item["hour"] = schedule.hour; + item["minute"] = schedule.minute; + item["keep_count"] = schedule.keep_count; + if (!schedule.last_run_date.empty()) + item["last_run_date"] = schedule.last_run_date; + if (!schedule.last_attempt_at.empty()) + item["last_attempt_at"] = schedule.last_attempt_at; + if (!schedule.last_attempt_status.empty()) + item["last_attempt_status"] = schedule.last_attempt_status; + if (!schedule.last_attempt_message.empty()) + item["last_attempt_message"] = schedule.last_attempt_message; + schedules[key] = item; + } + j["agent_backups"] = {{"schedules", schedules}}; + } + auto path = fs::path(data_dir) / "settings.json"; std::ofstream ofs(path, std::ios::trunc); if (ofs) ofs << j.dump(2) << '\n'; diff --git a/src/manager/app_settings.h b/src/manager/app_settings.h index 01d895a..e2e0643 100644 --- a/src/manager/app_settings.h +++ b/src/manager/app_settings.h @@ -5,6 +5,7 @@ #include #include +#include #include namespace settings { @@ -39,6 +40,17 @@ struct LlmProxySettings { bool enable_logging = false; }; +struct AgentBackupSchedule { + bool enabled = false; + int hour = 3; + int minute = 0; + int keep_count = 7; + std::string last_run_date; + std::string last_attempt_at; + std::string last_attempt_status; + std::string last_attempt_message; +}; + struct AppSettings { WindowGeometry window; std::vector vm_paths; @@ -49,6 +61,7 @@ struct AppSettings { std::vector sources; // empty = use DefaultSources() std::string last_selected_source; // name of last selected source LlmProxySettings llm_proxy; + std::unordered_map agent_backup_schedules; }; // Resolve effective directories (returns custom if set, otherwise default). diff --git a/src/manager/manager_service.cpp b/src/manager/manager_service.cpp index d6a6a84..f4a0757 100644 --- a/src/manager/manager_service.cpp +++ b/src/manager/manager_service.cpp @@ -32,6 +32,7 @@ extern FILE* GetManagerLogFile(); } while (0) #include +#include #include #include @@ -80,6 +81,24 @@ std::string DecodeHex(const std::string& value) { return out; } +std::string DecodeBase64(const std::string& value) { + if (value.empty()) return {}; + DWORD needed = 0; + if (!CryptStringToBinaryA(value.c_str(), static_cast(value.size()), + CRYPT_STRING_BASE64, nullptr, &needed, nullptr, nullptr) || + needed == 0) { + return {}; + } + std::string out(needed, '\0'); + if (!CryptStringToBinaryA(value.c_str(), static_cast(value.size()), + CRYPT_STRING_BASE64, + reinterpret_cast(out.data()), &needed, nullptr, nullptr)) { + return {}; + } + out.resize(needed); + return out; +} + std::string BuildRuntimeCommand(const std::string& exe, const VmSpec& spec, const std::string& pipe, const std::vector& guest_forwards = {}) { @@ -416,19 +435,7 @@ bool ManagerService::EditVm(const std::string& vm_id, const VmMutablePatch& patc } if (running && patch.shared_folders) { - ipc::Message msg; - msg.channel = ipc::Channel::kControl; - msg.kind = ipc::Kind::kRequest; - msg.type = "runtime.update_shared_folders"; - msg.vm_id = vm_id; - msg.request_id = GetTickCount64(); - msg.fields["folder_count"] = std::to_string(vm.spec.shared_folders.size()); - for (size_t i = 0; i < vm.spec.shared_folders.size(); ++i) { - const auto& f = vm.spec.shared_folders[i]; - msg.fields["folder_" + std::to_string(i)] = - f.tag + "|" + f.host_path + "|" + (f.readonly ? "1" : "0"); - } - SendRuntimeMessage(vm, msg); + SendSharedFoldersUpdateLocked(vm_id, vm); } return true; @@ -863,6 +870,26 @@ bool ManagerService::SendRuntimeMessage(VmRecord& vm, const ipc::Message& msg) { return true; } +void ManagerService::SendSharedFoldersUpdateLocked(const std::string& vm_id, VmRecord& vm) { + if (vm.state != VmPowerState::kRunning) return; + std::vector folders = vm.spec.shared_folders; + folders.insert(folders.end(), vm.runtime_shared_folders.begin(), vm.runtime_shared_folders.end()); + + ipc::Message msg; + msg.channel = ipc::Channel::kControl; + msg.kind = ipc::Kind::kRequest; + msg.type = "runtime.update_shared_folders"; + msg.vm_id = vm_id; + msg.request_id = GetTickCount64(); + msg.fields["folder_count"] = std::to_string(folders.size()); + for (size_t i = 0; i < folders.size(); ++i) { + const auto& f = folders[i]; + msg.fields["folder_" + std::to_string(i)] = + f.tag + "|" + f.host_path + "|" + (f.readonly ? "1" : "0"); + } + SendRuntimeMessage(vm, msg); +} + void ManagerService::ApplyPendingPatchLocked(VmRecord& vm) { if (!vm.pending_patch) return; const auto patch = *vm.pending_patch; @@ -872,6 +899,7 @@ void ManagerService::ApplyPendingPatchLocked(VmRecord& vm) { } void ManagerService::CleanupRuntimeHandles(VmRecord& vm) { + FailPendingGuestExecForVm(vm.spec.vm_id, "VM runtime disconnected"); vm.runtime.pipe_connected = false; if (vm.runtime.process_handle) { HANDLE proc = reinterpret_cast(vm.runtime.process_handle); @@ -887,6 +915,7 @@ void ManagerService::CleanupRuntimeHandles(VmRecord& vm) { vm.runtime.process_handle = nullptr; } vm.runtime.process_id = 0; + vm.runtime_shared_folders.clear(); vm.runtime.recv_pending.clear(); vm.runtime.recv_payload_needed = 0; vm.runtime.recv_pending_msg = {}; @@ -968,6 +997,88 @@ bool ManagerService::IsGuestAgentConnected(const std::string& vm_id) const { return vm->guest_agent_connected; } +bool ManagerService::RunGuestAgentCommand(const std::string& vm_id, + const std::string& command, + uint32_t timeout_ms, + GuestExecCallback callback, + const std::string& user) { + if (command.empty()) { + if (callback) { + GuestExecResult result; + result.error = "missing command"; + callback(std::move(result)); + } + return false; + } + + timeout_ms = std::clamp(timeout_ms, 1000, 600000); + const uint64_t request_id = next_guest_exec_request_id_.fetch_add(1, std::memory_order_relaxed); + + { + std::lock_guard exec_lock(guest_exec_mutex_); + pending_guest_exec_[request_id] = PendingGuestExec{vm_id, std::move(callback)}; + } + + bool sent = false; + { + std::lock_guard lock(vms_mutex_); + VmRecord* vm = FindVm(vm_id); + if (vm && vm->state == VmPowerState::kRunning && + vm->runtime.pipe_connected && vm->guest_agent_connected) { + ipc::Message msg; + msg.channel = ipc::Channel::kControl; + msg.kind = ipc::Kind::kRequest; + msg.type = "runtime.guest_exec"; + msg.vm_id = vm_id; + msg.request_id = request_id; + msg.fields["command_hex"] = EncodeHex(command); + msg.fields["timeout_ms"] = std::to_string(timeout_ms); + if (!user.empty()) msg.fields["user"] = user; + sent = SendRuntimeMessage(*vm, msg); + } + } + + if (!sent) { + GuestExecCallback cb; + { + std::lock_guard exec_lock(guest_exec_mutex_); + auto it = pending_guest_exec_.find(request_id); + if (it != pending_guest_exec_.end()) { + cb = std::move(it->second.callback); + pending_guest_exec_.erase(it); + } + } + if (cb) { + GuestExecResult result; + result.error = "Guest Agent 未连接或 VM 未运行"; + cb(std::move(result)); + } + return false; + } + return true; +} + +void ManagerService::FailPendingGuestExecForVm(const std::string& vm_id, const std::string& error) { + std::vector callbacks; + { + std::lock_guard lock(guest_exec_mutex_); + for (auto it = pending_guest_exec_.begin(); it != pending_guest_exec_.end(); ) { + if (it->second.vm_id == vm_id) { + callbacks.push_back(std::move(it->second.callback)); + it = pending_guest_exec_.erase(it); + } else { + ++it; + } + } + } + for (auto& cb : callbacks) { + if (!cb) continue; + GuestExecResult result; + result.error = error; + cb(std::move(result)); + } +} + bool ManagerService::SendKeyEvent(const std::string& vm_id, uint32_t key_code, bool pressed) { // try_lock: these are called from WndProc which can be re-entered while // the UI thread holds vms_mutex_ (e.g. WM_ACTIVATEAPP during WaitForSingleObject). @@ -1133,6 +1244,12 @@ bool ManagerService::AddSharedFolder(const std::string& vm_id, const SharedFolde return false; } } + for (const auto& sf : vm.runtime_shared_folders) { + if (sf.tag == folder.tag) { + if (error) *error = "shared folder with tag '" + folder.tag + "' already exists"; + return false; + } + } // Check host path exists DWORD attrs = GetFileAttributesA(folder.host_path.c_str()); @@ -1145,19 +1262,7 @@ bool ManagerService::AddSharedFolder(const std::string& vm_id, const SharedFolde settings::SaveVmManifest(vm.spec); if (vm.state == VmPowerState::kRunning) { - ipc::Message msg; - msg.channel = ipc::Channel::kControl; - msg.kind = ipc::Kind::kRequest; - msg.type = "runtime.update_shared_folders"; - msg.vm_id = vm_id; - msg.request_id = GetTickCount64(); - msg.fields["folder_count"] = std::to_string(vm.spec.shared_folders.size()); - for (size_t i = 0; i < vm.spec.shared_folders.size(); ++i) { - const auto& f = vm.spec.shared_folders[i]; - msg.fields["folder_" + std::to_string(i)] = - f.tag + "|" + f.host_path + "|" + (f.readonly ? "1" : "0"); - } - SendRuntimeMessage(vm, msg); + SendSharedFoldersUpdateLocked(vm_id, vm); } return true; @@ -1184,19 +1289,7 @@ bool ManagerService::RemoveSharedFolder(const std::string& vm_id, const std::str settings::SaveVmManifest(vm.spec); if (vm.state == VmPowerState::kRunning) { - ipc::Message msg; - msg.channel = ipc::Channel::kControl; - msg.kind = ipc::Kind::kRequest; - msg.type = "runtime.update_shared_folders"; - msg.vm_id = vm_id; - msg.request_id = GetTickCount64(); - msg.fields["folder_count"] = std::to_string(vm.spec.shared_folders.size()); - for (size_t i = 0; i < vm.spec.shared_folders.size(); ++i) { - const auto& f = vm.spec.shared_folders[i]; - msg.fields["folder_" + std::to_string(i)] = - f.tag + "|" + f.host_path + "|" + (f.readonly ? "1" : "0"); - } - SendRuntimeMessage(vm, msg); + SendSharedFoldersUpdateLocked(vm_id, vm); } return true; @@ -1211,6 +1304,61 @@ std::vector ManagerService::GetSharedFolders(const std::string& vm return vm->spec.shared_folders; } +bool ManagerService::AddRuntimeSharedFolder(const std::string& vm_id, const SharedFolder& folder, + std::string* error) { + std::lock_guard lock(vms_mutex_); + VmRecord* vmp = FindVm(vm_id); + if (!vmp) { + if (error) *error = "vm not found"; + return false; + } + VmRecord& vm = *vmp; + if (vm.state != VmPowerState::kRunning) { + if (error) *error = "VM must be running"; + return false; + } + for (const auto& sf : vm.spec.shared_folders) { + if (sf.tag == folder.tag) { + if (error) *error = "shared folder with tag '" + folder.tag + "' already exists"; + return false; + } + } + for (const auto& sf : vm.runtime_shared_folders) { + if (sf.tag == folder.tag) { + if (error) *error = "shared folder with tag '" + folder.tag + "' already exists"; + return false; + } + } + DWORD attrs = GetFileAttributesA(folder.host_path.c_str()); + if (attrs == INVALID_FILE_ATTRIBUTES || !(attrs & FILE_ATTRIBUTE_DIRECTORY)) { + if (error) *error = "host path does not exist or is not a directory"; + return false; + } + vm.runtime_shared_folders.push_back(folder); + SendSharedFoldersUpdateLocked(vm_id, vm); + return true; +} + +bool ManagerService::RemoveRuntimeSharedFolder(const std::string& vm_id, const std::string& tag, + std::string* error) { + std::lock_guard lock(vms_mutex_); + VmRecord* vmp = FindVm(vm_id); + if (!vmp) { + if (error) *error = "vm not found"; + return false; + } + VmRecord& vm = *vmp; + auto it = std::find_if(vm.runtime_shared_folders.begin(), vm.runtime_shared_folders.end(), + [&tag](const SharedFolder& sf) { return sf.tag == tag; }); + if (it == vm.runtime_shared_folders.end()) { + if (error) *error = "shared folder with tag '" + tag + "' not found"; + return false; + } + vm.runtime_shared_folders.erase(it); + SendSharedFoldersUpdateLocked(vm_id, vm); + return true; +} + bool ManagerService::AddHostForward(const std::string& vm_id, const HostForward& forward, std::string* error) { std::lock_guard lock(vms_mutex_); @@ -1970,6 +2118,35 @@ void ManagerService::HandleIncomingMessage(const std::string& vm_id, const ipc:: } // Guest Agent state events + if (msg.channel == ipc::Channel::kControl && + msg.kind == ipc::Kind::kResponse && + msg.type == "runtime.guest_exec.result") { + GuestExecCallback cb; + { + std::lock_guard lock(guest_exec_mutex_); + auto it = pending_guest_exec_.find(msg.request_id); + if (it != pending_guest_exec_.end()) { + cb = std::move(it->second.callback); + pending_guest_exec_.erase(it); + } + } + if (cb) { + GuestExecResult result; + auto get = [&](const char* key) -> std::string { + auto it = msg.fields.find(key); + return it == msg.fields.end() ? std::string{} : it->second; + }; + result.ok = get("ok") == "true"; + const auto exit_code = get("exit_code"); + if (!exit_code.empty()) result.exit_code = std::atoi(exit_code.c_str()); + result.stdout_text = DecodeBase64(get("out_b64")); + result.stderr_text = DecodeBase64(get("err_b64")); + result.error = get("error"); + cb(std::move(result)); + } + return; + } + if (msg.channel == ipc::Channel::kControl && msg.kind == ipc::Kind::kEvent && msg.type == "guest_agent.state") { diff --git a/src/manager/manager_service.h b/src/manager/manager_service.h index ccd285c..4a458d7 100644 --- a/src/manager/manager_service.h +++ b/src/manager/manager_service.h @@ -26,6 +26,7 @@ #include #include #include +#include #include struct VmRuntimeHandle { @@ -88,6 +89,7 @@ struct VmRecord { VmPowerState state = VmPowerState::kStopped; std::optional pending_patch; VmRuntimeHandle runtime; + std::vector runtime_shared_folders; int last_exit_code = 0; bool reboot_pending = false; bool guest_agent_connected = false; @@ -196,6 +198,26 @@ class ManagerService { void SetGuestAgentStateCallback(GuestAgentStateCallback cb); bool IsGuestAgentConnected(const std::string& vm_id) const; + struct GuestExecResult { + bool ok = false; + int exit_code = -1; + std::string stdout_text; + std::string stderr_text; + std::string error; + + std::string CombinedOutput() const { + if (!stdout_text.empty() && !stderr_text.empty()) + return stdout_text + "\n" + stderr_text; + return stdout_text + stderr_text; + } + }; + using GuestExecCallback = std::function; + bool RunGuestAgentCommand(const std::string& vm_id, + const std::string& command, + uint32_t timeout_ms, + GuestExecCallback callback, + const std::string& user = "tenbox"); + // Host-forward error callback: when host ports fail to bind // failed_mappings format: "host_port:guest_port" for each failed binding using HostForwardErrorCallback = std::function GetSharedFolders(const std::string& vm_id) const; + bool AddRuntimeSharedFolder(const std::string& vm_id, const SharedFolder& folder, std::string* error); + bool RemoveRuntimeSharedFolder(const std::string& vm_id, const std::string& tag, std::string* error); // Host-forward management (host listens, traffic forwarded to guest). bool AddHostForward(const std::string& vm_id, const HostForward& forward, std::string* error); @@ -263,6 +287,8 @@ class ManagerService { void HandleProcessExit(const std::string& vm_id); void CleanupRuntimeHandles(VmRecord& vm); void HandleIncomingMessage(const std::string& vm_id, const ipc::Message& msg); + void SendSharedFoldersUpdateLocked(const std::string& vm_id, VmRecord& vm); + void FailPendingGuestExecForVm(const std::string& vm_id, const std::string& error); void InitJobObject(); @@ -289,6 +315,13 @@ class ManagerService { // Guest forwards injected into every VM (e.g. LLM proxy guestfwd) std::vector global_guest_forwards_; + struct PendingGuestExec { + std::string vm_id; + GuestExecCallback callback; + }; + std::mutex guest_exec_mutex_; + std::unordered_map pending_guest_exec_; + std::atomic next_guest_exec_request_id_{1}; void AppendGuestFwdFields(ipc::Message& msg, const std::vector& vm_guest_forwards = {}) const; diff --git a/src/manager/ui/agent_tools_dialog.cpp b/src/manager/ui/agent_tools_dialog.cpp new file mode 100644 index 0000000..07b09e4 --- /dev/null +++ b/src/manager/ui/agent_tools_dialog.cpp @@ -0,0 +1,366 @@ +#include "manager/ui/agent_tools_dialog.h" + +#include "manager/agent_tools_service.h" +#include "manager/app_settings.h" +#include "manager/i18n.h" +#include "manager/ui/dlg_builder.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace { + +using agent_tools::AgentKind; +using agent_tools::MigrationOptions; +using agent_tools::SkillConflictStrategy; + +enum Id { + IDC_AGENT_KIND = 2101, + IDC_HEALTH, + IDC_BACKUP, + IDC_RESTORE, + IDC_EXPORT, + IDC_IMPORT, + IDC_RESTART, + IDC_RESET, + IDC_DIAG, + IDC_SOURCE_VM, + IDC_STRATEGY, + IDC_WORKSPACE, + IDC_MIGRATE, + IDC_SCHEDULE_ENABLED, + IDC_SCHEDULE_TIME, + IDC_SCHEDULE_KEEP, + IDC_SCHEDULE_SAVE, + IDC_OPEN_BACKUPS, + IDC_OUTPUT +}; + +constexpr UINT WM_AGENT_RESULT = WM_APP + 71; +constexpr UINT WM_AGENT_PROGRESS = WM_APP + 72; + +struct VmChoice { + std::string id; + std::string name; +}; + +struct PostedResult { + agent_tools::ToolResult result; +}; + +struct PostedProgress { + std::string step; + std::string message; + std::string detail; +}; + +std::string Text(const char* en, const char* zh) { + return i18n::GetCurrentLanguage() == i18n::Lang::kChineseSimplified ? zh : en; +} + +struct DialogData { + ManagerService& manager; + agent_tools::AgentToolsService tools; + std::string vm_id; + std::vector source_vms; + bool busy = false; + + DialogData(ManagerService& mgr, std::string id) + : manager(mgr), tools(mgr, mgr.data_dir()), vm_id(std::move(id)) {} +}; + +std::string ScheduleKey(const std::string& vm_id, AgentKind agent) { + return vm_id + "|" + agent_tools::AgentRawValue(agent); +} + +AgentKind SelectedAgent(HWND dlg) { + int idx = static_cast(SendDlgItemMessageW(dlg, IDC_AGENT_KIND, CB_GETCURSEL, 0, 0)); + return idx == 1 ? AgentKind::kOpenClaw : AgentKind::kHermes; +} + +void AppendOutput(HWND dlg, const std::string& text) { + HWND out = GetDlgItem(dlg, IDC_OUTPUT); + int len = GetWindowTextLengthW(out); + std::wstring w = i18n::to_wide(text + "\r\n"); + SendMessageW(out, EM_SETSEL, len, len); + SendMessageW(out, EM_REPLACESEL, FALSE, reinterpret_cast(w.c_str())); +} + +void SetBusy(HWND dlg, DialogData* data, bool busy) { + data->busy = busy; + for (int id : {IDC_HEALTH, IDC_BACKUP, IDC_RESTORE, IDC_EXPORT, IDC_IMPORT, + IDC_RESTART, IDC_RESET, IDC_DIAG, IDC_MIGRATE, IDC_SCHEDULE_SAVE}) { + EnableWindow(GetDlgItem(dlg, id), busy ? FALSE : TRUE); + } +} + +std::string SaveFileDialog(HWND dlg, const std::string& filename) { + wchar_t file_buf[MAX_PATH]{}; + MultiByteToWideChar(CP_UTF8, 0, filename.c_str(), -1, file_buf, MAX_PATH); + static constexpr wchar_t filter[] = L"Agent Profile (*.tar.gz)\0*.tar.gz\0All Files\0*.*\0\0"; + OPENFILENAMEW ofn{}; + ofn.lStructSize = sizeof(ofn); + ofn.hwndOwner = dlg; + ofn.lpstrFilter = filter; + ofn.lpstrFile = file_buf; + ofn.nMaxFile = MAX_PATH; + ofn.Flags = OFN_OVERWRITEPROMPT | OFN_PATHMUSTEXIST; + return GetSaveFileNameW(&ofn) ? i18n::wide_to_utf8(file_buf) : std::string{}; +} + +std::string OpenProfileDialog(HWND dlg) { + return BrowseForFile(dlg, "Agent Profile (*.tar.gz)\0*.tar.gz\0All Files\0*.*\0\0", ""); +} + +int ScheduleKeep(HWND dlg) { + wchar_t buf[16]{}; + GetDlgItemTextW(dlg, IDC_SCHEDULE_KEEP, buf, 16); + int v = _wtoi(buf); + return std::clamp(v, 1, 99); +} + +void LoadSchedule(HWND dlg, DialogData* data) { + auto agent = SelectedAgent(dlg); + auto key = ScheduleKey(data->vm_id, agent); + settings::AgentBackupSchedule schedule; + auto it = data->manager.app_settings().agent_backup_schedules.find(key); + if (it != data->manager.app_settings().agent_backup_schedules.end()) { + schedule = it->second; + } + CheckDlgButton(dlg, IDC_SCHEDULE_ENABLED, schedule.enabled ? BST_CHECKED : BST_UNCHECKED); + wchar_t time_buf[16]{}; + swprintf_s(time_buf, L"%02d:%02d", schedule.hour, schedule.minute); + SetDlgItemTextW(dlg, IDC_SCHEDULE_TIME, time_buf); + SetDlgItemTextW(dlg, IDC_SCHEDULE_KEEP, i18n::to_wide(std::to_string(schedule.keep_count)).c_str()); +} + +void SaveSchedule(HWND dlg, DialogData* data) { + wchar_t time_buf[32]{}; + GetDlgItemTextW(dlg, IDC_SCHEDULE_TIME, time_buf, 32); + int hour = 3, minute = 0; + swscanf_s(time_buf, L"%d:%d", &hour, &minute); + settings::AgentBackupSchedule schedule; + schedule.enabled = IsDlgButtonChecked(dlg, IDC_SCHEDULE_ENABLED) == BST_CHECKED; + schedule.hour = std::clamp(hour, 0, 23); + schedule.minute = std::clamp(minute, 0, 59); + schedule.keep_count = ScheduleKeep(dlg); + data->manager.app_settings().agent_backup_schedules[ScheduleKey(data->vm_id, SelectedAgent(dlg))] = schedule; + data->manager.SaveAppSettings(); + data->tools.RotateBackups(data->vm_id, SelectedAgent(dlg), schedule.keep_count); + AppendOutput(dlg, Text("Scheduled backup settings saved", "定时备份设置已保存")); +} + +void RefreshSources(HWND dlg, DialogData* data) { + data->source_vms.clear(); + HWND combo = GetDlgItem(dlg, IDC_SOURCE_VM); + SendMessageW(combo, CB_RESETCONTENT, 0, 0); + for (const auto& rec : data->manager.ListVms()) { + if (rec.spec.vm_id == data->vm_id) continue; + if (rec.state != VmPowerState::kRunning || !rec.guest_agent_connected) continue; + data->source_vms.push_back({rec.spec.vm_id, rec.spec.name}); + SendMessageW(combo, CB_ADDSTRING, 0, reinterpret_cast(i18n::to_wide(rec.spec.name).c_str())); + } + if (!data->source_vms.empty()) SendMessageW(combo, CB_SETCURSEL, 0, 0); +} + +void StartOp(HWND dlg, DialogData* data, const std::string& label, + std::function run) { + if (data->busy) return; + SetBusy(dlg, data, true); + AppendOutput(dlg, Text("Start: ", "开始:") + label); + run([dlg](agent_tools::ToolResult result) { + PostMessageW(dlg, WM_AGENT_RESULT, 0, reinterpret_cast(new PostedResult{std::move(result)})); + }); +} + +void OpenBackups(HWND dlg, DialogData* data) { + std::filesystem::path dir = std::filesystem::path(data->manager.data_dir()) / + "AgentBackups" / data->vm_id / agent_tools::AgentRawValue(SelectedAgent(dlg)); + std::error_code ec; + std::filesystem::create_directories(dir, ec); + ShellExecuteW(dlg, L"open", i18n::to_wide(dir.string()).c_str(), nullptr, nullptr, SW_SHOWNORMAL); +} + +void InitDialog(HWND dlg, DialogData* data) { + CenterDialogToParent(dlg); + HWND agent = GetDlgItem(dlg, IDC_AGENT_KIND); + SendMessageW(agent, CB_ADDSTRING, 0, reinterpret_cast(L"Hermes")); + SendMessageW(agent, CB_ADDSTRING, 0, reinterpret_cast(L"OpenClaw")); + SendMessageW(agent, CB_SETCURSEL, 0, 0); + + HWND strategy = GetDlgItem(dlg, IDC_STRATEGY); + SendMessageW(strategy, CB_ADDSTRING, 0, reinterpret_cast(i18n::to_wide(Text("Keep Hermes skills", "技能保留 Hermes")).c_str())); + SendMessageW(strategy, CB_ADDSTRING, 0, reinterpret_cast(i18n::to_wide(Text("Overwrite Hermes skills", "技能覆盖 Hermes")).c_str())); + SendMessageW(strategy, CB_ADDSTRING, 0, reinterpret_cast(i18n::to_wide(Text("Rename imported skills", "技能重命名导入")).c_str())); + SendMessageW(strategy, CB_SETCURSEL, 0, 0); + SetDlgItemTextW(dlg, IDC_WORKSPACE, L"/home/tenbox/.hermes/workspace/openclaw-migrated"); + SendDlgItemMessageW(dlg, IDC_OUTPUT, EM_SETLIMITTEXT, 1024 * 1024, 0); + RefreshSources(dlg, data); + LoadSchedule(dlg, data); +} + +INT_PTR CALLBACK Proc(HWND dlg, UINT msg, WPARAM wp, LPARAM lp) { + auto* data = reinterpret_cast(GetWindowLongPtrW(dlg, DWLP_USER)); + switch (msg) { + case WM_INITDIALOG: + data = reinterpret_cast(lp); + SetWindowLongPtrW(dlg, DWLP_USER, reinterpret_cast(data)); + InitDialog(dlg, data); + return TRUE; + + case WM_AGENT_PROGRESS: { + std::unique_ptr p(reinterpret_cast(lp)); + AppendOutput(dlg, p->message + (p->detail.empty() ? "" : " - " + p->detail)); + return TRUE; + } + + case WM_AGENT_RESULT: { + std::unique_ptr r(reinterpret_cast(lp)); + SetBusy(dlg, data, false); + AppendOutput(dlg, std::string(r->result.ok ? Text("Done: ", "完成:") : Text("Failed: ", "失败:")) + r->result.message); + if (!r->result.output.empty()) AppendOutput(dlg, r->result.output); + RefreshSources(dlg, data); + return TRUE; + } + + case WM_COMMAND: { + const int id = LOWORD(wp); + if (id == IDCANCEL) { + if (data && data->busy) { + AppendOutput(dlg, Text("Operation is running. Please wait for it to finish before closing.", "操作执行中,请等待完成后关闭。")); + return TRUE; + } + EndDialog(dlg, 0); + return TRUE; + } + if (id == IDC_AGENT_KIND && HIWORD(wp) == CBN_SELCHANGE) { + LoadSchedule(dlg, data); + return TRUE; + } + const AgentKind agent = SelectedAgent(dlg); + const int keep = ScheduleKeep(dlg); + switch (id) { + case IDC_HEALTH: + StartOp(dlg, data, Text("Run diagnosis", "一键诊断"), [=](auto cb) { data->tools.HealthStatus(data->vm_id, agent, cb); }); + return TRUE; + case IDC_BACKUP: + StartOp(dlg, data, Text("Back Up Now", "立即备份"), [=](auto cb) { data->tools.SnapshotBackup(data->vm_id, agent, keep, cb); }); + return TRUE; + case IDC_RESTORE: { + auto backups = data->tools.ListBackups(data->vm_id, agent); + if (backups.empty()) { + AppendOutput(dlg, Text("No restorable backup was found", "没有找到可恢复的备份")); + return TRUE; + } + if (MessageBoxW(dlg, i18n::to_wide(Text("Restore will overwrite current Agent data. Restore latest backup?", "恢复会覆盖当前 Agent 数据,确认恢复最新备份?")).c_str(), i18n::to_wide(Text("Confirm restore", "确认恢复")).c_str(), MB_OKCANCEL | MB_ICONWARNING) == IDOK) { + StartOp(dlg, data, Text("Restore Latest Backup", "恢复最新备份"), [=](auto cb) { data->tools.RestoreBackup(data->vm_id, agent, backups.front().path, cb); }); + } + return TRUE; + } + case IDC_EXPORT: { + std::string path = SaveFileDialog(dlg, std::string(agent_tools::AgentRawValue(agent)) + "-profile.tar.gz"); + if (!path.empty()) StartOp(dlg, data, Text("Export Migration Package", "导出迁移包"), [=](auto cb) { data->tools.ExportProfile(data->vm_id, agent, path, cb); }); + return TRUE; + } + case IDC_IMPORT: { + std::string path = OpenProfileDialog(dlg); + if (!path.empty() && MessageBoxW(dlg, i18n::to_wide(Text("Import will replace current Agent data. Continue?", "导入会替换当前 Agent 数据,确认继续?")).c_str(), i18n::to_wide(Text("Confirm import", "确认导入")).c_str(), MB_OKCANCEL | MB_ICONWARNING) == IDOK) { + StartOp(dlg, data, Text("Import Migration Package", "导入迁移包"), [=](auto cb) { data->tools.ImportProfile(data->vm_id, agent, path, cb); }); + } + return TRUE; + } + case IDC_RESTART: + StartOp(dlg, data, Text("Restart", "重启服务"), [=](auto cb) { data->tools.RestartAgent(data->vm_id, agent, keep, cb); }); + return TRUE; + case IDC_RESET: + if (MessageBoxW(dlg, i18n::to_wide(Text("Reset will overwrite current Agent model configuration. Continue?", "重置会覆盖当前 Agent 模型配置,确认继续?")).c_str(), i18n::to_wide(Text("Confirm reset", "确认重置")).c_str(), MB_OKCANCEL | MB_ICONWARNING) == IDOK) { + StartOp(dlg, data, Text("Reset Config", "重置配置"), [=](auto cb) { data->tools.ResetAgentConfig(data->vm_id, agent, keep, cb); }); + } + return TRUE; + case IDC_DIAG: + StartOp(dlg, data, Text("Export Diagnostics", "导出诊断包"), [=](auto cb) { data->tools.ExportDiagnostics(data->vm_id, agent, cb); }); + return TRUE; + case IDC_MIGRATE: { + int sel = static_cast(SendDlgItemMessageW(dlg, IDC_SOURCE_VM, CB_GETCURSEL, 0, 0)); + if (sel < 0 || sel >= static_cast(data->source_vms.size())) { + AppendOutput(dlg, Text("Select a running OpenClaw source VM first", "请先选择运行中的 OpenClaw 来源 VM")); + return TRUE; + } + if (MessageBoxW(dlg, i18n::to_wide(Text("Migration will back up target Hermes, then run dry-run and apply. Continue?", "迁移会先备份目标 Hermes,再执行 dry-run 和正式迁移。确认继续?")).c_str(), i18n::to_wide(Text("Confirm migration", "确认迁移")).c_str(), MB_OKCANCEL | MB_ICONWARNING) != IDOK) + return TRUE; + wchar_t workspace[512]{}; + GetDlgItemTextW(dlg, IDC_WORKSPACE, workspace, 512); + int st = static_cast(SendDlgItemMessageW(dlg, IDC_STRATEGY, CB_GETCURSEL, 0, 0)); + MigrationOptions options; + options.workspace_target = i18n::wide_to_utf8(workspace); + options.skill_conflict = st == 1 ? SkillConflictStrategy::kOverwrite : + st == 2 ? SkillConflictStrategy::kRename : + SkillConflictStrategy::kSkip; + std::string source_id = data->source_vms[sel].id; + StartOp(dlg, data, Text("OpenClaw to Hermes Migration", "OpenClaw 到 Hermes 迁移"), [=](auto cb) { + data->tools.MigrateOpenClawToHermes(source_id, data->vm_id, options, keep, + [dlg](const std::string& step, const std::string& message, const std::string& detail) { + PostMessageW(dlg, WM_AGENT_PROGRESS, 0, reinterpret_cast(new PostedProgress{step, message, detail})); + }, + cb); + }); + return TRUE; + } + case IDC_SCHEDULE_SAVE: + SaveSchedule(dlg, data); + return TRUE; + case IDC_OPEN_BACKUPS: + OpenBackups(dlg, data); + return TRUE; + } + break; + } + } + return FALSE; +} + +} // namespace + +void ShowAgentToolsDialog(HWND parent, ManagerService& mgr, const std::string& vm_id) { + DlgBuilder b; + b.Begin(Text("Agent Toolbox", "Agent 急救箱").c_str(), 0, 0, 610, 430, WS_CAPTION | WS_SYSMENU); + b.AddStatic(-1, "Agent:", 12, 12, 45, 12); + b.AddComboBox(IDC_AGENT_KIND, 60, 10, 110, 80); + b.AddButton(IDC_HEALTH, Text("Run diagnosis", "一键诊断").c_str(), 185, 9, 72, 16); + b.AddButton(IDC_BACKUP, Text("Back Up Now", "立即备份").c_str(), 262, 9, 72, 16); + b.AddButton(IDC_RESTORE, Text("Restore Latest", "恢复最新").c_str(), 339, 9, 72, 16); + b.AddButton(IDC_OPEN_BACKUPS, Text("Open Backups", "打开备份").c_str(), 416, 9, 72, 16); + + b.AddButton(IDC_EXPORT, Text("Export", "导出包").c_str(), 12, 38, 66, 16); + b.AddButton(IDC_IMPORT, Text("Import", "导入包").c_str(), 84, 38, 66, 16); + b.AddButton(IDC_RESTART, Text("Restart", "重启服务").c_str(), 156, 38, 72, 16); + b.AddButton(IDC_RESET, Text("Reset Config", "重置配置").c_str(), 234, 38, 72, 16); + b.AddButton(IDC_DIAG, Text("Diagnostics", "导出诊断").c_str(), 312, 38, 72, 16); + + b.AddCheckBox(IDC_SCHEDULE_ENABLED, Text("Scheduled", "定时备份").c_str(), 400, 39, 70, 14); + b.AddEdit(IDC_SCHEDULE_TIME, 472, 38, 45, 15); + b.AddStatic(-1, Text("Keep", "保留").c_str(), 522, 40, 24, 12); + b.AddEdit(IDC_SCHEDULE_KEEP, 548, 38, 24, 15); + b.AddButton(IDC_SCHEDULE_SAVE, Text("Save", "保存").c_str(), 576, 38, 28, 16); + + b.AddStatic(-1, Text("Migrate OpenClaw to this Hermes:", "OpenClaw 迁移到当前 Hermes:").c_str(), 12, 72, 150, 12); + b.AddComboBox(IDC_SOURCE_VM, 165, 69, 120, 100); + b.AddComboBox(IDC_STRATEGY, 292, 69, 120, 100); + b.AddEdit(IDC_WORKSPACE, 418, 69, 115, 15); + b.AddButton(IDC_MIGRATE, Text("Migrate", "自动迁移").c_str(), 540, 68, 58, 17); + + b.AddEdit(IDC_OUTPUT, 12, 98, 586, 300, ES_MULTILINE | ES_AUTOVSCROLL | ES_READONLY | WS_VSCROLL); + b.AddButton(IDCANCEL, Text("Close", "关闭").c_str(), 540, 405, 58, 17); + + DialogData data(mgr, vm_id); + DialogBoxIndirectParamW(GetModuleHandleW(nullptr), b.Build(), parent, Proc, reinterpret_cast(&data)); +} diff --git a/src/manager/ui/agent_tools_dialog.h b/src/manager/ui/agent_tools_dialog.h new file mode 100644 index 0000000..4d73bc3 --- /dev/null +++ b/src/manager/ui/agent_tools_dialog.h @@ -0,0 +1,11 @@ +#pragma once + +#include "manager/manager_service.h" + +#define NOMINMAX +#define WIN32_LEAN_AND_MEAN +#include + +#include + +void ShowAgentToolsDialog(HWND parent, ManagerService& mgr, const std::string& vm_id); diff --git a/src/manager/ui/win32_ui_shell.cpp b/src/manager/ui/win32_ui_shell.cpp index c0b25cb..6deb91c 100644 --- a/src/manager/ui/win32_ui_shell.cpp +++ b/src/manager/ui/win32_ui_shell.cpp @@ -3,12 +3,14 @@ #include "manager/ui/create_vm_dialog.h" #include "manager/ui/settings_dialog.h" #include "manager/ui/llm_proxy_dialog.h" +#include "manager/ui/agent_tools_dialog.h" #include "manager/ui/win32_display_panel.h" #include "manager/ui/info_tab.h" #include "manager/ui/console_tab.h" #include "manager/ui/vm_listview.h" #include "manager/i18n.h" #include "manager/app_settings.h" +#include "manager/agent_tools_service.h" #include "manager/resource.h" #include "version.h" @@ -32,7 +34,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -61,6 +65,7 @@ enum CmdId : UINT { IDM_LLM_PROXY = 1027, IDM_HELP_DOC = 1028, IDM_TRAY_TOGGLE = 1029, + IDM_AGENT_TOOLS = 1030, }; // ── Control IDs ── @@ -159,6 +164,7 @@ struct Win32UiShell::Impl { UINT_PTR resize_timer_id = 0; static constexpr UINT kResizeTimerId = 9001; static constexpr UINT kResizeDebounceMs = 500; + static constexpr UINT kAgentBackupTimerId = 9002; HFONT ui_font = nullptr; HFONT mono_font = nullptr; @@ -202,6 +208,7 @@ struct Win32UiShell::Impl { std::unordered_map vm_ui_states; std::unordered_map> audio_players; + std::set scheduled_agent_backups_running; VmUiState& GetVmUiState(const std::string& vm_id) { return vm_ui_states[vm_id]; @@ -330,6 +337,7 @@ static HMENU BuildMenuBar(bool show_toolbar) { AppendMenuW(vm_menu, MF_SEPARATOR, 0, nullptr); AppendMenuW(vm_menu, MF_STRING, IDM_SHARED_FOLDERS, i18n::tr_w(S::kToolbarSharedFolders).c_str()); AppendMenuW(vm_menu, MF_STRING, IDM_PORT_FORWARDS, i18n::tr_w(S::kMenuPortForwards).c_str()); + AppendMenuW(vm_menu, MF_STRING, IDM_AGENT_TOOLS, i18n::to_wide(i18n::GetCurrentLanguage() == i18n::Lang::kChineseSimplified ? "Agent 急救箱..." : "Agent Toolbox...").c_str()); AppendMenuW(bar, MF_POPUP, reinterpret_cast(vm_menu), i18n::tr_w(S::kMenuVm).c_str()); HMENU view_menu = CreatePopupMenu(); @@ -764,6 +772,7 @@ static void UpdateCommandStates(Impl* p) { EnableCmd(IDM_DELETE, has_sel && !running); EnableCmd(IDM_SHARED_FOLDERS, has_sel); EnableCmd(IDM_PORT_FORWARDS, has_sel); + EnableCmd(IDM_AGENT_TOOLS, has_sel && running && ga_ok); SendMessage(p->toolbar, TB_ENABLEBUTTON, IDM_DPI_ZOOM, MAKELONG((has_sel && p->dpi != 96) ? TRUE : FALSE, 0)); @@ -825,6 +834,9 @@ static LRESULT CALLBACK MainWndProc(HWND hwnd, UINT msg, WPARAM wp, LPARAM lp) { } } } + if (p && wp == Impl::kAgentBackupTimerId) { + shell->RunDueAgentBackups(); + } return 0; case WM_COMMAND: { @@ -1000,6 +1012,15 @@ static LRESULT CALLBACK MainWndProc(HWND hwnd, UINT msg, WPARAM wp, LPARAM lp) { shell->RefreshVmList(); return 0; } + case IDM_AGENT_TOOLS: { + if (p->selected_index < 0 || + p->selected_index >= static_cast(p->records.size())) + break; + const std::string& vm_id = p->records[p->selected_index].spec.vm_id; + ShowAgentToolsDialog(hwnd, shell->manager_, vm_id); + shell->RefreshVmList(); + return 0; + } case IDM_VIEW_TOOLBAR: { auto& show = shell->manager_.app_settings().show_toolbar; show = !show; @@ -1444,6 +1465,68 @@ static LRESULT CALLBACK MainWndProc(HWND hwnd, UINT msg, WPARAM wp, LPARAM lp) { // ── Lifetime ── +static std::string AgentBackupDateKey(const SYSTEMTIME& st) { + char buf[16]{}; + snprintf(buf, sizeof(buf), "%04u-%02u-%02u", st.wYear, st.wMonth, st.wDay); + return buf; +} + +static std::string AgentBackupTimestamp(const SYSTEMTIME& st) { + char buf[32]{}; + snprintf(buf, sizeof(buf), "%04u-%02u-%02u %02u:%02u", + st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute); + return buf; +} + +void Win32UiShell::RunDueAgentBackups() { + SYSTEMTIME now{}; + GetLocalTime(&now); + const std::string today = AgentBackupDateKey(now); + const int now_minutes = static_cast(now.wHour) * 60 + static_cast(now.wMinute); + + std::vector> due; + for (const auto& [key, schedule] : manager_.app_settings().agent_backup_schedules) { + if (!schedule.enabled || schedule.last_run_date == today) continue; + if (now_minutes < schedule.hour * 60 + schedule.minute) continue; + if (impl_->scheduled_agent_backups_running.count(key)) continue; + due.push_back({key, schedule}); + } + + for (const auto& [key, schedule] : due) { + const auto sep = key.find('|'); + if (sep == std::string::npos) continue; + const std::string vm_id = key.substr(0, sep); + const std::string agent_name = key.substr(sep + 1); + const auto agent = agent_name == "openclaw" + ? agent_tools::AgentKind::kOpenClaw + : agent_tools::AgentKind::kHermes; + auto vm = manager_.GetVm(vm_id); + if (!vm || vm->state != VmPowerState::kRunning || !vm->guest_agent_connected) { + auto& s = manager_.app_settings().agent_backup_schedules[key]; + s.last_attempt_at = AgentBackupTimestamp(now); + s.last_attempt_status = "failed"; + s.last_attempt_message = !vm ? "VM 不存在" : "VM 未运行或 Guest Agent 未连接"; + manager_.SaveAppSettings(); + continue; + } + + impl_->scheduled_agent_backups_running.insert(key); + auto tools = std::make_shared(manager_, manager_.data_dir()); + tools->SnapshotBackup(vm_id, agent, schedule.keep_count, + [this, key, today, now, tools](agent_tools::ToolResult result) { + InvokeOnUiThread([this, key, today, now, result = std::move(result)]() mutable { + impl_->scheduled_agent_backups_running.erase(key); + auto& s = manager_.app_settings().agent_backup_schedules[key]; + s.last_attempt_at = AgentBackupTimestamp(now); + s.last_attempt_status = result.ok ? "success" : "failed"; + s.last_attempt_message = result.ok ? "成功" : result.message; + s.last_run_date = today; + manager_.SaveAppSettings(); + }); + }); + } +} + Win32UiShell::Win32UiShell(ManagerService& manager) : manager_(manager), impl_(std::make_unique()) @@ -1780,6 +1863,8 @@ Win32UiShell::Win32UiShell(ManagerService& manager) RefreshVmList(); LayoutControls(impl_.get()); + SetTimer(impl_->hwnd, Impl::kAgentBackupTimerId, 60 * 1000, nullptr); + RunDueAgentBackups(); } Win32UiShell::~Win32UiShell() { @@ -1813,6 +1898,7 @@ Win32UiShell::~Win32UiShell() { impl_->tray_added = false; } if (impl_->hwnd) { + KillTimer(impl_->hwnd, Impl::kAgentBackupTimerId); RemoveClipboardFormatListener(impl_->hwnd); } if (impl_->ui_font) DeleteObject(impl_->ui_font); diff --git a/src/manager/ui/win32_ui_shell.h b/src/manager/ui/win32_ui_shell.h index a5e127f..a37dc6b 100644 --- a/src/manager/ui/win32_ui_shell.h +++ b/src/manager/ui/win32_ui_shell.h @@ -16,6 +16,7 @@ class Win32UiShell { void Run(); void Quit(); void RefreshVmList(); + void RunDueAgentBackups(); static void InvokeOnUiThread(std::function fn); static void SetClipboardFromVm(bool value); diff --git a/src/runtime/runtime_service.cpp b/src/runtime/runtime_service.cpp index 9847422..15f7c3b 100644 --- a/src/runtime/runtime_service.cpp +++ b/src/runtime/runtime_service.cpp @@ -505,7 +505,15 @@ void RuntimeControlService::AttachVm(Vm* vm) { if (vm_) { console_port_->SetInputCallback([vm](const uint8_t* data, size_t size) { - vm->InjectConsoleBytes(data, size); + static constexpr size_t kConsoleInputChunk = 64; + for (size_t off = 0; off < size; off += kConsoleInputChunk) { + size_t remaining = size - off; + size_t chunk = remaining < kConsoleInputChunk ? remaining : kConsoleInputChunk; + vm->InjectConsoleBytes(data + off, chunk); + if (off + chunk < size) { + std::this_thread::sleep_for(std::chrono::milliseconds(2)); + } + } }); input_port_->SetKeyEventCallback([vm](const KeyboardEvent& ev) { @@ -585,6 +593,87 @@ bool RuntimeControlService::SendWithPayload(const ipc::Message& message) { } void RuntimeControlService::HandleMessage(const ipc::Message& message) { + if (message.channel == ipc::Channel::kControl && + message.kind == ipc::Kind::kRequest && + message.type == "runtime.guest_exec") { + ipc::Message resp; + resp.kind = ipc::Kind::kResponse; + resp.channel = ipc::Channel::kControl; + resp.type = "runtime.guest_exec.result"; + resp.vm_id = vm_id_; + resp.request_id = message.request_id; + + std::string command; + auto it_hex = message.fields.find("command_hex"); + if (it_hex != message.fields.end()) { + auto command_bytes = DecodeHex(it_hex->second); + command.assign(command_bytes.begin(), command_bytes.end()); + } else { + auto it = message.fields.find("command"); + if (it != message.fields.end()) { + command = it->second; + } + } + if (command.empty()) { + resp.fields["ok"] = "false"; + resp.fields["error"] = "missing command"; + Send(resp); + return; + } + if (!vm_ || !vm_->GetGuestAgentHandler() || !vm_->IsGuestAgentConnected()) { + resp.fields["ok"] = "false"; + resp.fields["error"] = "guest agent not connected"; + Send(resp); + return; + } + + int timeout_ms = 120000; + auto it_timeout = message.fields.find("timeout_ms"); + if (it_timeout != message.fields.end()) { + auto [p, ec] = std::from_chars( + it_timeout->second.data(), + it_timeout->second.data() + it_timeout->second.size(), + timeout_ms); + if (ec != std::errc{} || timeout_ms <= 0) { + timeout_ms = 120000; + } + } + timeout_ms = std::clamp(timeout_ms, 1000, 600000); + + const uint64_t req_id = message.request_id; + std::string user; + auto it_user = message.fields.find("user"); + if (it_user != message.fields.end()) { + user = it_user->second; + } + bool started = vm_->GetGuestAgentHandler()->RunShellCommand( + command, + user, + std::chrono::milliseconds(timeout_ms), + [this, req_id](GuestAgentHandler::ExecResult result) { + ipc::Message exec_resp; + exec_resp.kind = ipc::Kind::kResponse; + exec_resp.channel = ipc::Channel::kControl; + exec_resp.type = "runtime.guest_exec.result"; + exec_resp.vm_id = vm_id_; + exec_resp.request_id = req_id; + exec_resp.fields["ok"] = result.ok ? "true" : "false"; + exec_resp.fields["exit_code"] = std::to_string(result.exit_code); + exec_resp.fields["out_b64"] = result.out_data; + exec_resp.fields["err_b64"] = result.err_data; + if (!result.error.empty()) { + exec_resp.fields["error"] = result.error; + } + Send(exec_resp); + }); + if (!started) { + resp.fields["ok"] = "false"; + resp.fields["error"] = "failed to start guest command"; + Send(resp); + } + return; + } + if (message.channel == ipc::Channel::kControl && message.kind == ipc::Kind::kRequest && message.type == "runtime.command") { @@ -596,14 +685,23 @@ void RuntimeControlService::HandleMessage(const ipc::Message& message) { resp.request_id = message.request_id; resp.fields["ok"] = "true"; - auto it = message.fields.find("command"); - if (it == message.fields.end()) { + std::string cmd; + auto it_hex = message.fields.find("command_hex"); + if (it_hex != message.fields.end()) { + auto command_bytes = DecodeHex(it_hex->second); + cmd.assign(command_bytes.begin(), command_bytes.end()); + } else { + auto it = message.fields.find("command"); + if (it != message.fields.end()) { + cmd = it->second; + } + } + if (cmd.empty()) { resp.fields["ok"] = "false"; resp.fields["error"] = "missing command"; Send(resp); return; } - const std::string& cmd = it->second; if (cmd == "stop") { if (vm_) vm_->RequestStop(); } else if (cmd == "shutdown") { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 01de478..132eec0 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -15,3 +15,11 @@ target_include_directories(test_qcow2 PRIVATE ) target_link_libraries(test_qcow2 PRIVATE zlibstatic libzstd_static) + +find_program(SH_EXECUTABLE sh REQUIRED) +add_test( + NAME test_agent_tools_guest + COMMAND ${SH_EXECUTABLE} + ${CMAKE_CURRENT_SOURCE_DIR}/test_agent_tools_guest.sh + ${CMAKE_SOURCE_DIR}/src/agent_tools/guest/agent_tools.sh +) diff --git a/tests/test_agent_tools_guest.sh b/tests/test_agent_tools_guest.sh new file mode 100644 index 0000000..776f089 --- /dev/null +++ b/tests/test_agent_tools_guest.sh @@ -0,0 +1,321 @@ +#!/bin/sh +set -eu + +script="${1:-}" +[ -n "$script" ] && [ -f "$script" ] || { echo "Usage: $0 PATH_TO_AGENT_TOOLS_SH" >&2; exit 2; } +command -v python3 >/dev/null 2>&1 || { echo "python3 is required" >&2; exit 2; } + +tmp="$(mktemp -d)" +trap 'rm -rf "$tmp"' EXIT + +real_python="$(command -v python3)" +real_tar="$(command -v tar)" +real_mv="$(command -v mv)" +fakebin="$tmp/bin" +mkdir -p "$fakebin" +cat > "$fakebin/uname" <<'EOF' +#!/bin/sh +echo Linux +EOF +cat > "$fakebin/python3" </dev/null 2>&1 || { + echo "Expected '$needle' in $file" >&2 + cat "$file" >&2 || true + exit 1 + } +} + +assert_file_not_contains() { + file="$1" + needle="$2" + if grep -F -- "$needle" "$file" >/dev/null 2>&1; then + echo "Did not expect '$needle' in $file" >&2 + cat "$file" >&2 || true + exit 1 + fi +} + +make_package() { + agent="$1" + profile_dir="$2" + output="$3" + work="$tmp/pkg-$(basename "$output")" + rm -rf "$work" + mkdir -p "$work" + cat > "$work/manifest.json" < "$home/.hermes/settings.json" +run_tool "$home" export-profile hermes "$tmp/hermes-profile.tgz" backup > "$tmp/export.out" +[ -f "$tmp/hermes-profile.tgz" ] || { echo "export did not create package" >&2; exit 1; } +[ -f "$tmp/hermes-profile.tgz.manifest.json" ] || { echo "export did not create manifest" >&2; exit 1; } + +cat > "$fakebin/mv" < "$fallback_home/.hermes/settings.json" +run_tool "$fallback_home" export-profile hermes "$tmp/fallback-profile.tgz" backup > "$tmp/fallback-export.out" +[ -f "$tmp/fallback-profile.tgz" ] || { echo "export finalize fallback did not create package" >&2; exit 1; } +set -- "$tmp"/fallback-profile.tgz.tmp.* +[ ! -e "$1" ] || { echo "export finalize fallback left a tmp file" >&2; exit 1; } +rm -f "$fakebin/mv" + +cat > "$fakebin/tar" <&2 + echo "tar: .hermes: file changed as we read it" >&2 + exit 1 +fi +if [ "\$1" = "-czf" ] && [ "\$has_hermes" -eq 1 ] && [ "\${LIVE_TAR_MODE:-}" = "mixed-error" ]; then + "$real_tar" "\$@" + echo "tar: .hermes: file changed as we read it" >&2 + echo "tar: .hermes/missing: Cannot stat: No such file or directory" >&2 + exit 1 +fi +exec "$real_tar" "\$@" +EOF +chmod +x "$fakebin/tar" +churn_home="$tmp/churn-home" +mkdir -p "$churn_home/.hermes" +printf '{"churn":true}\n' > "$churn_home/.hermes/settings.json" +LIVE_TAR_MODE=churn run_tool "$churn_home" export-profile hermes "$tmp/churn-profile.tgz" backup > "$tmp/churn-export.out" 2> "$tmp/churn-export.err" +[ -f "$tmp/churn-profile.tgz" ] || { echo "live-churn export did not create package" >&2; exit 1; } +assert_file_contains "$tmp/churn-export.err" "File removed before we read it" +assert_file_contains "$tmp/churn-export.err" "file changed as we read it" +if LIVE_TAR_MODE=mixed-error run_tool "$churn_home" export-profile hermes "$tmp/mixed-error-profile.tgz" backup > "$tmp/mixed-error-export.out" 2>&1; then + echo "mixed tar error unexpectedly succeeded" >&2 + exit 1 +fi +assert_file_contains "$tmp/mixed-error-export.out" "Cannot stat" +assert_file_contains "$tmp/mixed-error-export.out" "Failed to export hermes profile" +rm -f "$fakebin/tar" + +fresh_home="$tmp/fresh-home" +mkdir -p "$fresh_home" +run_tool "$fresh_home" import-profile hermes "$tmp/hermes-profile.tgz" > "$tmp/import.out" +[ -f "$fresh_home/.hermes/settings.json" ] || { echo "import did not restore settings" >&2; exit 1; } + +if run_tool "$fresh_home" import-profile openclaw "$tmp/hermes-profile.tgz" > "$tmp/cross.out" 2>&1; then + echo "cross-agent import unexpectedly succeeded" >&2 + exit 1 +fi +assert_file_contains "$tmp/cross.out" "not openclaw" + +make_python_package hermes "$tmp/unsafe.tgz" unsafe +if run_tool "$fresh_home" import-profile hermes "$tmp/unsafe.tgz" > "$tmp/unsafe.out" 2>&1; then + echo "unsafe archive unexpectedly imported" >&2 + exit 1 +fi +assert_file_contains "$tmp/unsafe.out" "unsafe path" + +make_python_package hermes "$tmp/link.tgz" link +if run_tool "$fresh_home" import-profile hermes "$tmp/link.tgz" > "$tmp/link.out" 2>&1; then + echo "link archive unexpectedly imported" >&2 + exit 1 +fi +assert_file_contains "$tmp/link.out" "unsupported link" + +make_python_package hermes "$tmp/special.tgz" special +if run_tool "$fresh_home" import-profile hermes "$tmp/special.tgz" > "$tmp/special.out" 2>&1; then + echo "special archive unexpectedly imported" >&2 + exit 1 +fi +assert_file_contains "$tmp/special.out" "unsupported entry type" + +rollback_src="$tmp/rollback-src" +rollback_home="$tmp/rollback-home" +mkdir -p "$rollback_src/.hermes" "$rollback_home/.hermes" +printf 'new\n' > "$rollback_src/.hermes/settings.json" +printf 'old\n' > "$rollback_home/.hermes/settings.json" +make_package hermes "$rollback_src" "$tmp/rollback.tgz" +cat > "$fakebin/tar" < "$tmp/rollback.out" 2>&1; then + echo "forced extraction failure unexpectedly succeeded" >&2 + exit 1 +fi +assert_file_contains "$tmp/rollback.out" "Failed to import hermes profile" +assert_file_contains "$rollback_home/.hermes/settings.json" "old" +rm -f "$fakebin/tar" + +openclaw_home="$tmp/openclaw-home" +mkdir -p "$openclaw_home/.openclaw/feishu" "$openclaw_home/.openclaw/openclaw-weixin/node_modules/gtoken/node_modules/.bin" "$openclaw_home/.openclaw/browser/openclaw/user-data" "$openclaw_home/.openclaw/skills" "$tmp/linked-skill" +printf 'token\n' > "$openclaw_home/.openclaw/feishu/config" +printf 'skill\n' > "$tmp/linked-skill/SKILL.md" +ln -s ../uuid "$openclaw_home/.openclaw/openclaw-weixin/node_modules/gtoken/node_modules/.bin/uuid" +ln -s /tmp/chrome-lock "$openclaw_home/.openclaw/browser/openclaw/user-data/SingletonLock" +ln -s "$tmp/linked-skill" "$openclaw_home/.openclaw/skills/lark-minutes" +cat > "$openclaw_home/.openclaw/settings.json" <<'EOF' +{ + "mcpServers": { + "demo": { + "command": "demo" + } + }, + "permissions": { + "allow": ["demo"] + } +} +EOF +run_tool "$openclaw_home" export-openclaw-source "$tmp/openclaw-source.tgz" > "$tmp/export-openclaw.out" +"$real_tar" -tzf "$tmp/openclaw-source.tgz" | grep -F "node_modules" >/dev/null 2>&1 && { + echo "OpenClaw source export included node_modules" >&2 + exit 1 +} +"$real_tar" -tzf "$tmp/openclaw-source.tgz" | grep -F "SingletonLock" >/dev/null 2>&1 && { + echo "OpenClaw source export included browser runtime locks" >&2 + exit 1 +} +"$real_tar" -tzf "$tmp/openclaw-source.tgz" | grep -F ".openclaw/skills/lark-minutes/SKILL.md" >/dev/null 2>&1 || { + echo "OpenClaw source export did not dereference skill links" >&2 + exit 1 +} +cat > "$fakebin/tar" <&2; exit 9 ;; + esac + ;; +esac +exec "$real_tar" "\$@" +EOF +chmod +x "$fakebin/tar" +if run_tool "$openclaw_home" migrate-openclaw-dry-run "$tmp/openclaw-source.tgz" "$tmp/migration-report.md" merge skip > "$tmp/migrate.out" 2>&1; then + echo "migration dry run unexpectedly succeeded without hermes" >&2 + exit 1 +fi +assert_file_contains "$tmp/migrate.out" "missing the Hermes command" + +export HERMES_LOG="$tmp/hermes.log" +cat > "$fakebin/hermes" <<'EOF' +#!/bin/sh +printf '%s\n' "$*" >> "$HERMES_LOG" +[ "${1:-}" = "--overwrite" ] && { echo "global overwrite rejected" >&2; exit 64; } +if [ "${HERMES_REFUSE_APPLY:-}" = "1" ]; then + case " $* " in + *" --yes "*) echo "Refusing to apply"; exit 0 ;; + esac +fi +case "$*" in + *"claw migrate "*) echo "fake migration completed"; exit 0 ;; + config\ set*) exit 0 ;; +esac +exit 0 +EOF +chmod +x "$fakebin/hermes" + +TMPDIR=/dev/null run_tool "$openclaw_home" migrate-openclaw-dry-run "$tmp/openclaw-source.tgz" "$tmp/migration-report-ok.md" merge skip > "$tmp/migrate-ok.out" +assert_file_contains "$tmp/migration-report-ok.md" "fake migration completed" +assert_file_contains "$HERMES_LOG" "claw migrate" +assert_file_contains "$HERMES_LOG" "claw migrate --overwrite" +assert_file_contains "$HERMES_LOG" "--dry-run" +assert_file_contains "$HERMES_LOG" "--skill-conflict merge" +assert_file_contains "$HERMES_LOG" "--workspace-target skip" + +if HERMES_REFUSE_APPLY=1 TMPDIR=/dev/null run_tool "$openclaw_home" migrate-openclaw-apply "$tmp/openclaw-source.tgz" "$tmp/migration-refuse.md" overwrite preserve > "$tmp/migrate-refuse.out" 2>&1; then + echo "migration apply unexpectedly succeeded after refusal report" >&2 + exit 1 +fi +unset HERMES_REFUSE_APPLY +assert_file_contains "$tmp/migration-refuse.md" "Refusing to apply" +assert_file_contains "$tmp/migrate-refuse.out" "Hermes migration failed" + +TMPDIR=/dev/null run_tool "$openclaw_home" migrate-openclaw-apply "$tmp/openclaw-source.tgz" "$tmp/migration-apply.md" overwrite preserve > "$tmp/migrate-apply.out" +assert_file_contains "$tmp/migration-apply.md" "Migration completed." +assert_file_contains "$HERMES_LOG" "--yes" +assert_file_contains "$HERMES_LOG" "--skill-conflict overwrite" +assert_file_contains "$HERMES_LOG" "--workspace-target preserve" +[ -f "$openclaw_home/.hermes/feishu/config" ] || { echo "apply did not copy channel config" >&2; exit 1; } +assert_file_contains "$openclaw_home/.hermes/settings.json" "mcpServers" +rm -f "$fakebin/tar"