From f9bee2819d7280d2e27b55457ed7777d5cd289f9 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Wed, 7 May 2025 21:28:28 -0700 Subject: [PATCH 01/25] WIP: deploy command --- MANIFEST.in | 2 +- synapse/cli/__main__.py | 3 +- synapse/cli/deploy.py | 767 ++++++++++++++++++ synapse/templates/app/build_docker.sh | 60 ++ synapse/templates/app/deploy/deploy.py | 253 ++++++ synapse/templates/app/deploy/requirements.txt | 2 + synapse/templates/app/ops/package/package.sh | 70 ++ .../ops/package/scripts/launch_synapse_app.sh | 44 + .../app/ops/package/scripts/postinstall.sh | 17 + .../app/ops/package/scripts/postremove.sh | 5 + .../app/ops/package/scripts/preremove.sh | 9 + .../ops/package/systemd/{{APP_NAME}}.service | 27 + synapse/templates/app/package.sh | 37 + synapse/templates/app/setup_app.py | 261 ++++++ synapse/templates/app/start_docker.sh | 40 + 15 files changed, 1595 insertions(+), 2 deletions(-) create mode 100644 synapse/cli/deploy.py create mode 100755 synapse/templates/app/build_docker.sh create mode 100644 synapse/templates/app/deploy/deploy.py create mode 100644 synapse/templates/app/deploy/requirements.txt create mode 100755 synapse/templates/app/ops/package/package.sh create mode 100755 synapse/templates/app/ops/package/scripts/launch_synapse_app.sh create mode 100755 synapse/templates/app/ops/package/scripts/postinstall.sh create mode 100755 synapse/templates/app/ops/package/scripts/postremove.sh create mode 100755 synapse/templates/app/ops/package/scripts/preremove.sh create mode 100644 synapse/templates/app/ops/package/systemd/{{APP_NAME}}.service create mode 100644 synapse/templates/app/package.sh create mode 100644 synapse/templates/app/setup_app.py create mode 100644 synapse/templates/app/start_docker.sh diff --git a/MANIFEST.in b/MANIFEST.in index 4c3fecf0..e375b7fd 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +1 @@ -recursive-include synapse *.py *.pyx *.pyd *.c *.h +recursive-include synapse *.py *.pyx *.pyd *.c *.h *.sh *.service diff --git a/synapse/cli/__main__.py b/synapse/cli/__main__.py index ad9012af..2fa050cd 100755 --- a/synapse/cli/__main__.py +++ b/synapse/cli/__main__.py @@ -5,7 +5,7 @@ import sys from importlib import metadata -from synapse.cli import discover, rpc, streaming, offline_plot, files +from synapse.cli import discover, rpc, streaming, offline_plot, files, deploy from rich.logging import RichHandler from rich.console import Console from synapse.utils.discover import find_device_by_name @@ -64,6 +64,7 @@ def main(): streaming.add_commands(subparsers) offline_plot.add_commands(subparsers) files.add_commands(subparsers) + deploy.add_commands(subparsers) args = parser.parse_args() # If we need to setup the device URI, do that now diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py new file mode 100644 index 00000000..09e1c0ad --- /dev/null +++ b/synapse/cli/deploy.py @@ -0,0 +1,767 @@ +import argparse +import os +import sys +import subprocess +import shutil +import json +import tempfile +import time +import logging +from pathlib import Path +from rich.console import Console +from rich.panel import Panel +from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TimeElapsedColumn +from rich.prompt import Prompt +from rich import box + +# Set up console for normal output and a separate one for logs +console = Console() +log_console = Console(stderr=True) + +# Configure logging for paramiko to be less verbose +logging.getLogger("paramiko").setLevel(logging.WARNING) + +def validate_manifest(manifest_path): + """Validate the manifest file exists and has required properties""" + try: + with open(manifest_path, 'r') as f: + manifest = json.load(f) + + # Basic validation + if 'name' not in manifest: + console.print(f"[bold red]Error:[/bold red] manifest.json is missing required 'name' property") + return False + + return manifest + except FileNotFoundError: + console.print(f"[bold red]Error:[/bold red] manifest.json not found in {manifest_path}") + return False + except json.JSONDecodeError: + console.print(f"[bold red]Error:[/bold red] manifest.json is not valid JSON") + return False + +def replace_app_name_in_scripts(app_dir, app_name): + """Replace the app name in packaging scripts""" + package_script = os.path.join(app_dir, 'ops', 'package', 'package.sh') + + # List of scripts that need app name replacement + script_paths = [ + os.path.join(app_dir, 'ops', 'package', 'package.sh'), + os.path.join(app_dir, 'ops', 'package', 'scripts', 'postinstall.sh'), + os.path.join(app_dir, 'ops', 'package', 'scripts', 'preremove.sh'), + os.path.join(app_dir, 'ops', 'package', 'scripts', 'launch_synapse_app.sh') + ] + + # Replace template variables in each script + for script_path in script_paths: + if os.path.exists(script_path): + with open(script_path, 'r') as f: + content = f.read() + + # Replace the app name in the script + content = content.replace('{{APP_NAME}}', app_name) + content = content.replace('SYNAPSE_APP_EXE="synapse-example-app"', f'SYNAPSE_APP_EXE="{app_name}"') + + with open(script_path, 'w') as f: + f.write(content) + + # Handle the systemd service file specially - it needs to be renamed + systemd_template = os.path.join(app_dir, 'ops', 'package', 'systemd', '{{APP_NAME}}.service') + systemd_target = os.path.join(app_dir, 'ops', 'package', 'systemd', f'{app_name}.service') + + if os.path.exists(systemd_template): + # Read template content + with open(systemd_template, 'r') as f: + content = f.read() + + # Replace template variables + content = content.replace('{{APP_NAME}}', app_name) + + # Write to the new file + with open(systemd_target, 'w') as f: + f.write(content) + + # Remove the template file if it's different from the target + if systemd_template != systemd_target: + try: + os.remove(systemd_template) + except: + pass + +def package_app(app_dir): + """Package the application into a .deb file""" + # Check if we're in a Docker container + if os.path.exists('/.dockerenv'): + # We're inside Docker, directly run the package script + package_script = os.path.join(app_dir, 'ops', 'package', 'package.sh') + if not os.path.exists(package_script): + console.print(f"[bold red]Error:[/bold red] Package script not found at {package_script}") + return False + + # Make sure the script is executable + os.chmod(package_script, 0o755) + + # Make sure all the other scripts are executable too + script_dir = os.path.join(app_dir, 'ops', 'package', 'scripts') + if os.path.exists(script_dir): + for script in os.listdir(script_dir): + if script.endswith('.sh'): + script_path = os.path.join(script_dir, script) + os.chmod(script_path, 0o755) + + with Progress( + SpinnerColumn(), + TextColumn("[bold blue]{task.description}[/bold blue]"), + BarColumn(), + TimeElapsedColumn(), + console=console + ) as progress: + task = progress.add_task("[yellow]Packaging application...", total=1) + + # Run the package script + try: + subprocess.run(['bash', package_script], check=True, cwd=app_dir) + progress.update(task, advance=1) + return True + except subprocess.CalledProcessError as e: + console.print(f"[bold red]Error:[/bold red] Failed to package application: {e}") + return False + else: + # We're outside Docker, need to use docker to package + script_path = os.path.join(app_dir, 'build_docker.sh') + + # Check if build_docker.sh exists in app_dir + if not os.path.exists(script_path): + # Use the one from synapse-python instead + script_dir = os.path.dirname(os.path.abspath(__file__)) + template_dir = os.path.join(script_dir, '..', 'templates', 'app') + script_path = os.path.join(template_dir, 'build_docker.sh') + + if not os.path.exists(script_path): + console.print(f"[bold red]Error:[/bold red] Docker build script not found") + return False + + # Copy the script to the app directory + shutil.copy(script_path, os.path.join(app_dir, 'build_docker.sh')) + script_path = os.path.join(app_dir, 'build_docker.sh') + + # Also check if the ops directory exists, if not, copy template files + ops_dir = os.path.join(app_dir, 'ops') + if not os.path.exists(ops_dir) or not os.path.exists(os.path.join(ops_dir, 'package', 'package.sh')): + # Create the ops directory structure + os.makedirs(os.path.join(app_dir, 'ops', 'package', 'scripts'), exist_ok=True) + os.makedirs(os.path.join(app_dir, 'ops', 'package', 'systemd'), exist_ok=True) + + # Copy template files from synapse-python + template_ops_dir = os.path.join(template_dir, 'ops') + if os.path.exists(template_ops_dir): + # Copy package.sh + package_sh = os.path.join(template_ops_dir, 'package', 'package.sh') + if os.path.exists(package_sh): + shutil.copy(package_sh, os.path.join(app_dir, 'ops', 'package', 'package.sh')) + + # Copy scripts + scripts_dir = os.path.join(template_ops_dir, 'package', 'scripts') + if os.path.exists(scripts_dir): + for script in os.listdir(scripts_dir): + src = os.path.join(scripts_dir, script) + dst = os.path.join(app_dir, 'ops', 'package', 'scripts', script) + shutil.copy(src, dst) + + # Copy systemd files + systemd_dir = os.path.join(template_ops_dir, 'package', 'systemd') + if os.path.exists(systemd_dir): + for file in os.listdir(systemd_dir): + src = os.path.join(systemd_dir, file) + dst = os.path.join(app_dir, 'ops', 'package', 'systemd', file) + shutil.copy(src, dst) + + # Make sure the script is executable + os.chmod(script_path, 0o755) + + with Progress( + SpinnerColumn(), + TextColumn("[bold blue]{task.description}[/bold blue]"), + BarColumn(), + TimeElapsedColumn(), + console=console + ) as progress: + build_task = progress.add_task("[yellow]Building Docker image...", total=1) + + # Build the Docker image - capture output to prevent interference with progress + try: + # Use capture_output to prevent Docker output from interfering with progress bars + result = subprocess.run(['bash', script_path], check=True, cwd=app_dir, + capture_output=True, text=True) + + # Only log errors if they occur, otherwise just update progress + if result.stderr and ("error" in result.stderr.lower() or "fail" in result.stderr.lower()): + console.print(f"[bold red]Warning:[/bold red] {result.stderr}") + + progress.update(build_task, advance=1) + except subprocess.CalledProcessError as e: + console.print(f"[bold red]Error:[/bold red] Failed to build Docker image: {e}") + if e.stderr: + console.print(f"[red]{e.stderr}[/red]") + return False + + package_task = progress.add_task("[yellow]Packaging application...", total=1) + + # Now run package.sh in the Docker container + try: + # Detect architecture + arch = subprocess.check_output(['uname', '-m']).decode('utf-8').strip() + if arch in ['arm64', 'aarch64']: + tag_suffix = 'arm64' + else: + tag_suffix = 'amd64' + + # Image name + image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" + + # Run the packaging script in Docker - capture output + cmd = [ + 'docker', 'run', '-it', '--rm', + '-v', f"{os.path.abspath(app_dir)}:/home/workspace", + image, + '/bin/bash', '-c', "cd /home/workspace && ./ops/package/package.sh" + ] + + # Capture output to prevent it from interfering with progress bars + result = subprocess.run(cmd, check=True, cwd=app_dir, + capture_output=True, text=True) + + # Only log errors if they occur + if result.stderr and ("error" in result.stderr.lower() or "fail" in result.stderr.lower()): + console.print(f"[bold red]Warning:[/bold red] {result.stderr}") + + progress.update(package_task, advance=1) + + # Display success message after completion + console.print("[green]Package created successfully![/green]") + return True + except subprocess.CalledProcessError as e: + console.print(f"[bold red]Error:[/bold red] Failed to package application: {e}") + if e.stderr: + console.print(f"[red]{e.stderr}[/red]") + return False + +def find_deb_package(app_dir): + """Find the generated .deb package in the app directory""" + for file in os.listdir(app_dir): + if file.endswith('.deb'): + return os.path.join(app_dir, file) + + console.print(f"[bold red]Error:[/bold red] Could not find .deb package in {app_dir}") + return None + +def get_device_credentials(ip_address): + """Get user credentials with clear prompts""" + console.print() + console.print(Panel( + f"[bold yellow]Device Connection Details[/bold yellow]\n[white]Target device:[/white] [green]{ip_address}[/green]", + border_style="blue" + )) + + username = Prompt.ask("Enter login username", default="scifi") + + import getpass + console.print("[bold blue]Enter login password (input will be hidden):[/bold blue]", end=" ") + login_password = getpass.getpass("") + + console.print("[bold blue]Enter root password for package installation (input will be hidden):[/bold blue]", end=" ") + root_password = getpass.getpass("") + + console.print() + return username, login_password, root_password + +def deploy_package(ip_address, deb_package_path): + """Deploy the package to the device""" + package_filename = os.path.basename(deb_package_path) + + # Stop any previous progress display + console.clear_live() + + # Get cached credentials or prompt for new ones + cached_ip, username, login_password, root_password = load_cached_credentials() + + # If no cached credentials or they don't match our target IP, prompt for new ones + if not cached_ip or cached_ip != ip_address or not username or not login_password or not root_password: + username, login_password, root_password = get_device_credentials(ip_address) + + with Progress( + SpinnerColumn(), + TextColumn("[bold blue]{task.description}[/bold blue]"), + BarColumn(), + TimeElapsedColumn(), + console=console + ) as progress: + deploy_task = progress.add_task(f"[yellow]Deploying to {ip_address}...", total=3) + + try: + # Deploy directly using paramiko + client = None + sftp = None + shell = None + + # Create SSH client + import paramiko + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + # Connect to the device (connection task) + connect_task = progress.add_task("[green]Connecting to device...", total=1) + + try: + client.connect(ip_address, username=username, password=login_password, timeout=10) + progress.update(connect_task, completed=1) + progress.update(deploy_task, advance=1) + except Exception as e: + progress.update(connect_task, visible=False) + console.print(f"[bold red]Error connecting to {ip_address}:[/bold red] {str(e)}") + console.print("[yellow]Please check your username and password.[/yellow]") + return False + + # Upload file task + upload_task = progress.add_task("[cyan]Uploading package...", total=1) + + try: + # Create SFTP client and upload + sftp = client.open_sftp() + remote_path = f"/tmp/{package_filename}" + sftp.put(deb_package_path, remote_path) + progress.update(upload_task, completed=1) + progress.update(deploy_task, advance=1) + except Exception as e: + progress.update(upload_task, visible=False) + console.print(f"[bold red]Error uploading package:[/bold red] {str(e)}") + return False + + # Install task + install_task = progress.add_task("[magenta]Installing package...", total=1) + + try: + # Use expect-like behavior with Paramiko to handle su + shell = client.invoke_shell() + + # Set up a way to collect output + output = "" + + # Send su command + shell.send("su -\n") + time.sleep(1) # Wait for password prompt + + # Send root password + shell.send(f"{root_password}\n") + time.sleep(1) # Wait for su to authenticate + + # Send dpkg command + shell.send(f"dpkg -i {remote_path}\n") + time.sleep(5) # Give dpkg time to run + + # Exit from root shell + shell.send("exit\n") + time.sleep(0.5) + + # Collect the final output + while shell.recv_ready(): + chunk = shell.recv(4096).decode('utf-8') + output += chunk + + # Check for common error indicators + if "error" in output.lower() or "failed" in output.lower(): + progress.update(install_task, visible=False) + progress.update(deploy_task, visible=False) + console.print(Panel( + f"[bold red]Installation Error[/bold red]\n\n{output}", + title="Deployment Failed", + border_style="red", + box=box.DOUBLE + )) + return False + + # Cleanup + shell.send(f"rm {remote_path}\n") + time.sleep(0.5) + + # Mark installation as complete + progress.update(install_task, completed=1) + progress.update(deploy_task, advance=1) + + # Save successful credentials + save_credentials(ip_address, username, login_password, root_password) + + console.print(Panel( + f"[bold green]Successfully deployed[/bold green] [yellow]{package_filename}[/yellow] [bold green]to[/bold green] [blue]{ip_address}[/blue]", + title="Deployment Successful", + border_style="green", + box=box.DOUBLE + )) + return True + + except Exception as e: + progress.update(install_task, visible=False) + progress.update(deploy_task, visible=False) + console.print(f"[bold red]Error during installation:[/bold red] {str(e)}") + return False + + except Exception as e: + progress.update(deploy_task, visible=False) + console.print(f"[bold red]Error:[/bold red] Failed to deploy package: {e}") + return False + finally: + # Clean up connections + try: + if shell: + shell.close() + if sftp: + sftp.close() + if client: + client.close() + except: + pass + +def load_cached_credentials(): + """Load cached credentials from the config file""" + cache_file = ".synapse_deploy_cache.json" + try: + if os.path.exists(cache_file): + with open(cache_file, 'r') as f: + data = json.load(f) + ip_address = data.get('ip_address') + username = data.get('username', 'scifi') + encoded_login_password = data.get('encoded_login_password') + encoded_root_password = data.get('encoded_root_password') + + if encoded_login_password and encoded_root_password: + import base64 + login_password = base64.b64decode(encoded_login_password).decode('utf-8') + root_password = base64.b64decode(encoded_root_password).decode('utf-8') + console.print(f"[green]Using cached credentials for [bold]{username}@{ip_address}[/bold][/green]") + return ip_address, username, login_password, root_password + except Exception as e: + console.print(f"[yellow]Warning: Failed to load cached credentials: {e}[/yellow]") + return None, None, None, None + +def save_credentials(ip_address, username, login_password, root_password): + """Save credentials to cache file""" + cache_file = ".synapse_deploy_cache.json" + try: + import base64 + with open(cache_file, 'w') as f: + data = { + 'ip_address': ip_address, + 'username': username, + 'encoded_login_password': base64.b64encode(login_password.encode('utf-8')).decode('utf-8'), + 'encoded_root_password': base64.b64encode(root_password.encode('utf-8')).decode('utf-8') + } + json.dump(data, f) + os.chmod(cache_file, 0o600) # Restrict file permissions + except Exception as e: + console.print(f"[yellow]Warning: Failed to save credentials: {e}[/yellow]") + +def start_app(ip_address, app_name): + """Start the application on the device""" + # Stop any previous progress display + console.clear_live() + + # Get cached credentials or prompt for new ones + cached_ip, username, login_password, root_password = load_cached_credentials() + + # If no cached credentials or they don't match our target IP, prompt for new ones + if not cached_ip or cached_ip != ip_address or not username or not login_password or not root_password: + username, login_password, root_password = get_device_credentials(ip_address) + + with Progress( + SpinnerColumn(), + TextColumn("[bold blue]{task.description}[/bold blue]"), + console=console + ) as progress: + task = progress.add_task(f"[yellow]Starting {app_name} on {ip_address}...", total=1) + + try: + # Start the app using paramiko + import paramiko + client = None + shell = None + + try: + # Create SSH client + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + # Connect to the device + client.connect(ip_address, username=username, password=login_password, timeout=10) + + # Use interactive shell to run commands with sudo + shell = client.invoke_shell() + + # Send the command to start the application + shell.send(f"sudo systemctl start {app_name}.service\n") + time.sleep(2) # Wait for the command to execute + + # Collect output to check for errors + output = "" + while shell.recv_ready(): + chunk = shell.recv(4096).decode('utf-8') + output += chunk + + # If there's an error, we'll usually see it in the output + if "error" in output.lower() or "failed" in output.lower(): + progress.update(task, visible=False) + console.print(f"[bold red]Error:[/bold red] Failed to start application:\n{output}") + return False + + # Save successful credentials + save_credentials(ip_address, username, login_password, root_password) + + progress.update(task, advance=1) + + console.print(Panel( + f"[bold green]Successfully started[/bold green] [yellow]{app_name}[/yellow] [bold green]on[/bold green] [blue]{ip_address}[/blue]", + title="Application Started", + border_style="green", + box=box.DOUBLE + )) + return True + + except Exception as e: + progress.update(task, visible=False) + console.print(f"[bold red]Error:[/bold red] {str(e)}") + return False + + except Exception as e: + progress.update(task, visible=False) + console.print(f"[bold red]Error:[/bold red] Failed to start application: {e}") + return False + finally: + # Clean up connections + try: + if shell: + shell.close() + if client: + client.close() + except: + pass + +def build_app(app_dir, app_name): + """Build the application binary before packaging""" + console.print(f"[yellow]Building application: {app_name}...[/yellow]") + + # Check if binary already exists + binary_paths = [ + os.path.join(app_dir, 'build-aarch64', app_name), + os.path.join(app_dir, 'build', app_name), + os.path.join(app_dir, 'build-arm64', app_name), + os.path.join(app_dir, 'out', app_name) + ] + + for path in binary_paths: + if os.path.exists(path): + console.print(f"[green]Binary already exists at: {path}[/green]") + return True + + # Binary doesn't exist, build it + console.print("[yellow]Binary not found, attempting to build...[/yellow]") + + # Detect architecture + arch = subprocess.check_output(['uname', '-m']).decode('utf-8').strip() + if arch in ['arm64', 'aarch64']: + tag_suffix = 'arm64' + else: + tag_suffix = 'amd64' + + # Image name + image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" + + # Check if Docker image exists + try: + subprocess.run(['docker', 'image', 'inspect', image], check=True, + stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + console.print(f"[green]Docker image {image} already exists.[/green]") + except subprocess.CalledProcessError: + # Docker image doesn't exist, build it + console.print(f"[yellow]Docker image {image} not found, building it first...[/yellow]") + build_docker_script = os.path.join(app_dir, 'build_docker.sh') + try: + # Run the build script without capturing output so user can see progress + console.print(f"[blue]Running build_docker.sh...[/blue]") + subprocess.run(['bash', build_docker_script], check=True, cwd=app_dir) + console.print(f"[green]Successfully built Docker image.[/green]") + except subprocess.CalledProcessError as e: + console.print(f"[bold red]Error:[/bold red] Failed to build Docker image: {e}") + return False + + # Now build the app in Docker + console.print("[yellow]Building application in Docker container...[/yellow]") + console.print("[dim]This may take a few minutes. You'll see output during the build process.[/dim]") + + # First, try to run vcpkg to install dependencies + vcpkg_cmd = [ + 'docker', 'run', '--rm', + '-v', f"{os.path.abspath(app_dir)}:/home/workspace", + image, + '/bin/bash', '-c', "cd /home/workspace && if [ -f vcpkg.json ]; then echo 'Installing dependencies from vcpkg.json...'; ${VCPKG_ROOT}/vcpkg install --triplet arm64-linux-dynamic-release; fi" + ] + + try: + console.print("[blue]Installing dependencies...[/blue]") + subprocess.run(vcpkg_cmd, check=True, cwd=app_dir) + except subprocess.CalledProcessError as e: + console.print(f"[yellow]Warning: Failed to install dependencies. The build might still succeed.[/yellow]") + + # Now run the actual build command with a proper CMake preset + build_cmd = [ + 'docker', 'run', '--rm', + '-v', f"{os.path.abspath(app_dir)}:/home/workspace", + image, + '/bin/bash', '-c', "cd /home/workspace && \ + if [ -f CMakePresets.json ]; then \ + # Use the existing presets if available\ + echo 'Using existing CMake presets...' && \ + cmake --preset=dynamic-aarch64 && \ + cmake --build --preset=cross-release -j$(nproc); \ + else \ + # Fall back to manual configuration\ + echo 'No CMake presets found, using manual configuration...' && \ + export VCPKG_DEFAULT_TRIPLET=arm64-linux-dynamic-release && \ + cmake -B build -S . \ + -DCMAKE_TOOLCHAIN_FILE=${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake \ + -DVCPKG_TARGET_TRIPLET=arm64-linux-dynamic-release \ + -DVCPKG_INSTALLED_DIR=${VCPKG_ROOT}/vcpkg_installed \ + -DBUILD_SHARED_LIBS=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_FOR_ARM64=ON && \ + cmake --build build -j$(nproc); \ + fi" + ] + + try: + # Run without capturing output so the user can see progress + console.print("[blue]Running build command...[/blue]") + subprocess.run(build_cmd, check=True, cwd=app_dir) + + # Check if build succeeded + for path in binary_paths: + if os.path.exists(path): + console.print(f"[green]Successfully built binary at: {path}[/green]") + return True + + # If we get here, the build might have succeeded but we can't find the binary + console.print("[bold yellow]Warning: Build completed but binary not found in expected locations.[/bold yellow]") + # Try to find it manually + binary_path = subprocess.run( + ['find', app_dir, '-type', 'f', '-name', app_name, '-not', '-path', '*/.*'], + capture_output=True, text=True, check=False + ).stdout.strip() + + if binary_path: + binary_path = binary_path.split('\n')[0] # Take the first match if multiple + console.print(f"[green]Found binary at: {binary_path}[/green]") + + # Try to copy it to one of the standard locations + build_dir = os.path.join(app_dir, 'build') + os.makedirs(build_dir, exist_ok=True) + shutil.copy(binary_path, os.path.join(build_dir, app_name)) + console.print(f"[green]Copied binary to: {os.path.join(build_dir, app_name)}[/green]") + return True + + return False + except subprocess.CalledProcessError as e: + console.print(f"[bold red]Error:[/bold red] Failed to build application. Check the CMake output above for details.") + return False + +def deploy_cmd(args): + """Handle the deploy command""" + # Check for required modules and install them if missing + try: + import paramiko + except ImportError: + console.print("[yellow]Required module 'paramiko' not found. Attempting to install...[/yellow]") + try: + subprocess.check_call([sys.executable, "-m", "pip", "install", "paramiko>=2.7.2"]) + console.print("[green]Successfully installed paramiko.[/green]") + # Re-import after installing + import paramiko + except Exception as e: + console.print(f"[bold red]Error:[/bold red] Failed to install paramiko: {e}") + console.print("[yellow]Please manually install required dependencies:[/yellow]") + console.print("pip install paramiko>=2.7.2") + return + + # Get absolute path of app directory + app_dir = os.path.abspath(args.app_dir) + + # Validate manifest.json + manifest_path = os.path.join(app_dir, 'manifest.json') + manifest = validate_manifest(manifest_path) + if not manifest: + return + + # Get app name from manifest + app_name = manifest['name'] + console.print(f"[bold]Deploying application:[/bold] [yellow]{app_name}[/yellow]") + + # First, build the app + if not build_app(app_dir, app_name): + console.print("[bold red]Error:[/bold red] Failed to build the application.") + return + + # Replace app name in packaging scripts + replace_app_name_in_scripts(app_dir, app_name) + + # Package the app + if not package_app(app_dir): + return + + # Find the generated .deb package + deb_package = find_deb_package(app_dir) + if not deb_package: + return + + # Deploy the package to the device + uri = getattr(args, 'uri', None) + if uri: + deploy_package(uri, deb_package) + else: + console.print("[yellow]No URI provided. Package created but not deployed.[/yellow]") + console.print(f"[green]Package available at:[/green] {deb_package}") + +def start_app_cmd(args): + """Handle the start-app command""" + # Check for required modules and install them if missing + try: + import paramiko + except ImportError: + console.print("[yellow]Required module 'paramiko' not found. Attempting to install...[/yellow]") + try: + subprocess.check_call([sys.executable, "-m", "pip", "install", "paramiko>=2.7.2"]) + console.print("[green]Successfully installed paramiko.[/green]") + # Re-import after installing + import paramiko + except Exception as e: + console.print(f"[bold red]Error:[/bold red] Failed to install paramiko: {e}") + console.print("[yellow]Please manually install required dependencies:[/yellow]") + console.print("pip install paramiko>=2.7.2") + return + + uri = getattr(args, 'uri', None) + if not uri: + console.print("[bold red]Error:[/bold red] No URI provided. Cannot start the application.") + return + + start_app(uri, args.app_name) + +def add_commands(subparsers): + """Add deploy commands to the CLI""" + # Deploy command + deploy_parser = subparsers.add_parser('deploy', help='Deploy an application to a Synapse device') + deploy_parser.add_argument('app_dir', nargs='?', default='.', help='Path to the application directory') + deploy_parser.add_argument('--uri', '-u', help='Device IP address to deploy to', type=str) + deploy_parser.set_defaults(func=deploy_cmd) + + # Start app command + start_app_parser = subparsers.add_parser('start-app', help='Start an application on a Synapse device') + start_app_parser.add_argument('app_name', help='Name of the application to start') + start_app_parser.add_argument('--uri', '-u', help='Device IP address to start the app on', type=str) + start_app_parser.set_defaults(func=start_app_cmd) \ No newline at end of file diff --git a/synapse/templates/app/build_docker.sh b/synapse/templates/app/build_docker.sh new file mode 100755 index 00000000..55b18b45 --- /dev/null +++ b/synapse/templates/app/build_docker.sh @@ -0,0 +1,60 @@ +#!/bin/bash +set -e + +# App name from manifest.json or directory name +APP_NAME=$(basename "$(pwd)") +if [ -f "manifest.json" ]; then + # Try to extract the name from manifest.json + if command -v jq &> /dev/null; then + MANIFEST_NAME=$(jq -r '.name' manifest.json 2>/dev/null) + if [ -n "$MANIFEST_NAME" ] && [ "$MANIFEST_NAME" != "null" ]; then + APP_NAME=$MANIFEST_NAME + fi + fi +fi + +# Detect architecture +ARCH=$(uname -m) +if [[ "${ARCH}" == "arm64" || "${ARCH}" == "aarch64" ]]; then + CONTAINER_TAG="arm64" + PLATFORM="linux/arm64" + DOCKERFILE_PATH="ops/docker/Dockerfile.arm64" +else + CONTAINER_TAG="amd64" + PLATFORM="linux/amd64" + DOCKERFILE_PATH="ops/docker/Dockerfile" +fi + +# Image names +SDK_IMAGE="${APP_NAME}:latest-${CONTAINER_TAG}" + +echo "Building for architecture: $ARCH" +echo "Application name: $APP_NAME" + +# Build the SDK image +docker build -t $SDK_IMAGE -f "${DOCKERFILE_PATH}" . + +echo "Successfully built $SDK_IMAGE" + +# Check if the binary exists, and if not, try to build it +if [ ! -f "build/${APP_NAME}" ] && [ ! -f "build-aarch64/${APP_NAME}" ] && [ ! -f "build-arm64/${APP_NAME}" ]; then + echo "Binary not found, attempting to build it in Docker..." + # Try to build the binary in Docker + docker run --rm \ + -v "$(pwd):/home/workspace" \ + $SDK_IMAGE \ + /bin/bash -c "cd /home/workspace && \ + mkdir -p build && \ + cd build && \ + cmake .. && \ + make -j$(nproc)" + + # Check if build succeeded + if [ -f "build/${APP_NAME}" ]; then + echo "Binary built successfully at build/${APP_NAME}" + else + echo "Warning: Could not automatically build binary. Make sure it's available before packaging." + fi +else + echo "Binary already exists, skipping build step." +fi \ No newline at end of file diff --git a/synapse/templates/app/deploy/deploy.py b/synapse/templates/app/deploy/deploy.py new file mode 100644 index 00000000..92fef6b0 --- /dev/null +++ b/synapse/templates/app/deploy/deploy.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 + +import os +import sys +import getpass +import paramiko +import json +import base64 +import time +from pathlib import Path +from rich.console import Console +from rich.panel import Panel +from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TimeElapsedColumn +from rich.prompt import Prompt, Confirm +from rich.text import Text +from rich import box + +# Initialize Rich console +console = Console() + +# Configuration +CACHE_FILE = ".synapse_deploy_cache.json" + +def get_credentials(): + """Prompt for device credentials with rich formatting""" + console.print("[bold yellow]Device Connection Details[/bold yellow]") + ip_address = Prompt.ask("Enter SciFi device IP address") + username = Prompt.ask("Enter login username", default="scifi") + login_password = getpass.getpass("Enter login password: ") + root_password = getpass.getpass("Enter root password for package installation: ") + return ip_address, username, login_password, root_password + +def load_cached_credentials(): + """Load cached credentials if they exist""" + try: + if os.path.exists(CACHE_FILE): + with console.status("[bold blue]Loading cached credentials...[/bold blue]"): + with open(CACHE_FILE, 'r') as f: + data = json.load(f) + ip_address = data.get('ip_address') + username = data.get('username', 'admin') + encoded_login_password = data.get('encoded_login_password') + encoded_root_password = data.get('encoded_root_password') + + if encoded_login_password and encoded_root_password: + login_password = base64.b64decode(encoded_login_password).decode('utf-8') + root_password = base64.b64decode(encoded_root_password).decode('utf-8') + console.print(f"[green]Using cached credentials for [bold]{username}@{ip_address}[/bold][/green]") + return ip_address, username, login_password, root_password + except Exception as e: + console.print(f"[yellow]Warning: Failed to load cached credentials: {e}[/yellow]") + return None, None, None, None + +def save_credentials(ip_address, username, login_password, root_password): + """Save credentials to cache file""" + try: + with console.status("[bold blue]Saving credentials...[/bold blue]"): + with open(CACHE_FILE, 'w') as f: + data = { + 'ip_address': ip_address, + 'username': username, + 'encoded_login_password': base64.b64encode(login_password.encode('utf-8')).decode('utf-8'), + 'encoded_root_password': base64.b64encode(root_password.encode('utf-8')).decode('utf-8') + } + json.dump(data, f) + os.chmod(CACHE_FILE, 0o600) # Restrict file permissions + console.print("[green]Credentials saved successfully[/green]") + except Exception as e: + console.print(f"[yellow]Warning: Failed to save credentials: {e}[/yellow]") + +def deploy_package(ip_address, username, login_password, root_password, deb_package): + """Deploy and install the deb package to the SciFi device""" + package_filename = os.path.basename(deb_package) + + with Progress( + SpinnerColumn(), + TextColumn("[bold blue]{task.description}[/bold blue]"), + BarColumn(), + TimeElapsedColumn(), + console=console + ) as progress: + # Setup overall task + overall_task = progress.add_task("[yellow]Overall deployment progress...", total=4) + + # Connect to device + connect_task = progress.add_task(f"[green]Connecting as {username}@{ip_address}...", total=1) + try: + # Create SSH client + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + # Connect to the device + client.connect(ip_address, username=username, password=login_password, timeout=10) + progress.update(connect_task, advance=1) + progress.update(overall_task, advance=1) + + # Create SFTP task + transfer_task = progress.add_task(f"[cyan]Transferring {package_filename}...", total=1) + + # Create SFTP client + sftp = client.open_sftp() + remote_path = f"/tmp/{package_filename}" + + # Upload the package + sftp.put(deb_package, remote_path) + progress.update(transfer_task, advance=1) + progress.update(overall_task, advance=1) + + # Install task + install_task = progress.add_task("[magenta]Installing package with root privileges...", total=1) + + # Use expect-like behavior with Paramiko to handle su + # First, we create an interactive shell session + shell = client.invoke_shell() + + # Set up a way to collect output + output = "" + + # Send su command + shell.send("su -\n") + time.sleep(1) # Wait for password prompt + + # Send root password + shell.send(f"{root_password}\n") + time.sleep(1) # Wait for su to authenticate + + # Send dpkg command + shell.send(f"dpkg -i {remote_path}\n") + time.sleep(3) # Give dpkg time to run + + # Exit from root shell + shell.send("exit\n") + time.sleep(0.5) + + # Collect the final output + while shell.recv_ready(): + chunk = shell.recv(4096).decode('utf-8') + output += chunk + + # Check for common error indicators + if "error" in output.lower() or "failed" in output.lower(): + progress.update(install_task, completed=1, visible=False) + progress.update(overall_task, visible=False) + console.print(Panel( + f"[bold red]Installation Error[/bold red]\n\n{output}", + title="Deployment Failed", + border_style="red", + box=box.DOUBLE + )) + return False + + # Complete the tasks + progress.update(install_task, advance=1) + progress.update(overall_task, advance=1) + + # Cleanup task + cleanup_task = progress.add_task("[blue]Cleaning up...", total=1) + shell.send(f"rm {remote_path}\n") + time.sleep(0.5) + progress.update(cleanup_task, advance=1) + progress.update(overall_task, advance=1) + + console.print(Panel( + f"[bold green]Successfully deployed[/bold green] [yellow]{package_filename}[/yellow] [bold green]to[/bold green] [blue]{ip_address}[/blue]", + title="Deployment Successful", + border_style="green", + box=box.DOUBLE + )) + return True + + except Exception as e: + progress.update(overall_task, visible=False) + console.print(Panel( + f"[bold red]Connection Error[/bold red]\n\n{str(e)}", + title="Deployment Failed", + border_style="red", + box=box.DOUBLE + )) + return False + finally: + try: + if 'shell' in locals(): + shell.close() + if 'sftp' in locals(): + sftp.close() + if 'client' in locals(): + client.close() + except: + pass + +def main(): + # Print welcome banner + console.print(Panel( + "[bold]Synapse App Deployment Tool[/bold]", + border_style="blue", + box=box.ROUNDED + )) + + # Check if a .deb package was provided + if len(sys.argv) < 2: + console.print("[bold red]Error:[/bold red] No .deb package specified.") + console.print(f"Usage: {sys.argv[0]} path/to/package.deb") + sys.exit(1) + + deb_package = sys.argv[1] + + # Check if the IP address was provided as a second argument + ip_address = None + if len(sys.argv) > 2: + ip_address = sys.argv[2] + + # Check if the .deb package exists + if not os.path.isfile(deb_package): + console.print(f"[bold red]Error:[/bold red] The specified .deb package does not exist: [yellow]{deb_package}[/yellow]") + sys.exit(1) + + # Show package info + package_size = os.path.getsize(deb_package) / (1024 * 1024) # Size in MB + console.print(f"[bold cyan]Package:[/bold cyan] [yellow]{os.path.basename(deb_package)}[/yellow] ([cyan]{package_size:.2f} MB[/cyan])") + + # Load cached credentials or use provided IP + username = None + login_password = None + root_password = None + + if ip_address is None: + ip_address, username, login_password, root_password = load_cached_credentials() + + # If no cached credentials or IP was provided but no credentials, prompt for them + if not ip_address or not username or not login_password or not root_password: + if ip_address: + console.print(f"[bold]Using target device:[/bold] [yellow]{ip_address}[/yellow]") + username = Prompt.ask("Enter login username", default="scifi") + login_password = getpass.getpass("Enter login password: ") + root_password = getpass.getpass("Enter root password for package installation: ") + else: + ip_address, username, login_password, root_password = get_credentials() + + # Try to deploy until successful + while True: + if deploy_package(ip_address, username, login_password, root_password, deb_package): + # Save successful credentials + save_credentials(ip_address, username, login_password, root_password) + break + else: + if Confirm.ask("[yellow]Would you like to retry with different credentials?[/yellow]"): + ip_address, username, login_password, root_password = get_credentials() + else: + console.print("[bold red]Deployment aborted by user.[/bold red]") + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/synapse/templates/app/deploy/requirements.txt b/synapse/templates/app/deploy/requirements.txt new file mode 100644 index 00000000..ed2e064b --- /dev/null +++ b/synapse/templates/app/deploy/requirements.txt @@ -0,0 +1,2 @@ +paramiko>=2.7.2 +rich>=10.0.0 \ No newline at end of file diff --git a/synapse/templates/app/ops/package/package.sh b/synapse/templates/app/ops/package/package.sh new file mode 100755 index 00000000..7c29dfaa --- /dev/null +++ b/synapse/templates/app/ops/package/package.sh @@ -0,0 +1,70 @@ +#!/bin/bash +set -e + +SYNAPSE_APP_VERSION="0.1.0" +SYNAPSE_APP_EXE="{{APP_NAME}}" + +SCRIPT_DIR=$(dirname "$0") +SOURCE_DIR="${SCRIPT_DIR}/../../" +# Check multiple possible build directories +BUILD_DIRS=( + "${SOURCE_DIR}/build-aarch64/" + "${SOURCE_DIR}/build/" + "${SOURCE_DIR}/build-arm64/" + "${SOURCE_DIR}/out/" +) + +STAGING_DIR="/tmp/synapse-package" +mkdir -p ${STAGING_DIR} + +# Binary install and setup +mkdir -p ${STAGING_DIR}/opt/scifi/bin + +# Try to find the binary in various possible build directories +BINARY_FOUND=false +for BUILD_DIR in "${BUILD_DIRS[@]}"; do + if [ -f "${BUILD_DIR}/${SYNAPSE_APP_EXE}" ]; then + echo "Found binary at ${BUILD_DIR}/${SYNAPSE_APP_EXE}" + cp "${BUILD_DIR}/${SYNAPSE_APP_EXE}" "${STAGING_DIR}/opt/scifi/bin/" + BINARY_FOUND=true + break + fi +done + +# If we didn't find the binary, try to find it anywhere in the source directory +if [ "$BINARY_FOUND" = false ]; then + echo "Binary not found in standard build directories, searching source directory..." + BINARY_PATH=$(find "${SOURCE_DIR}" -name "${SYNAPSE_APP_EXE}" -type f | grep -v "${STAGING_DIR}" | head -n 1) + + if [ -n "$BINARY_PATH" ]; then + echo "Found binary at ${BINARY_PATH}" + cp "${BINARY_PATH}" "${STAGING_DIR}/opt/scifi/bin/" + BINARY_FOUND=true + else + echo "ERROR: Could not find binary ${SYNAPSE_APP_EXE} in any build directory!" + exit 1 + fi +fi + +# Launch script +mkdir -p ${STAGING_DIR}/opt/scifi/scripts +cp "${SCRIPT_DIR}/scripts/launch_synapse_app.sh" "${STAGING_DIR}/opt/scifi/scripts/" + +# Systemd service install and setup +mkdir -p ${STAGING_DIR}/etc/systemd/system +cp "${SCRIPT_DIR}/systemd/${SYNAPSE_APP_EXE}.service" "${STAGING_DIR}/etc/systemd/system/" + +fpm -s dir -t deb \ + -n "${SYNAPSE_APP_EXE}" \ + -f \ + -v "${SYNAPSE_APP_VERSION}" \ + -C ${STAGING_DIR} \ + --deb-no-default-config-files \ + --depends "systemd" \ + --vendor "Science Corporation" \ + --description "Synapse Application" \ + --architecture arm64 \ + --after-install "${SCRIPT_DIR}/scripts/postinstall.sh" \ + --before-remove "${SCRIPT_DIR}/scripts/preremove.sh" \ + --after-remove "${SCRIPT_DIR}/scripts/postremove.sh" \ + . \ No newline at end of file diff --git a/synapse/templates/app/ops/package/scripts/launch_synapse_app.sh b/synapse/templates/app/ops/package/scripts/launch_synapse_app.sh new file mode 100755 index 00000000..c058ae38 --- /dev/null +++ b/synapse/templates/app/ops/package/scripts/launch_synapse_app.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Launches app +# Will need to be run as root +# We might be able to get away with this as a normal user using CAP_SYS_NICE and CAP_SYS_RESOURCE, but we'll need to test +# TODO: this should be configurable +SYNAPSE_APP_EXE="{{APP_NAME}}" +MANIFEST_FILE="/opt/scifi/config/app_manifest.json" + +# set the process priority to something high +if ! renice -n -10 $$ > /dev/null 2>&1; then + echo "Failed to set process priority" + exit 1 +fi + +# Set CPU scheduler to FIFO +# Might drop this down if the system is unstable +if ! chrt -f -p 50 $$ > /dev/null 2>&1; then + echo "Failed to set CPU scheduler to FIFO" + exit 1 +fi + +# Note: Uncomment to set the CPU affinity to specific cores +# taskset -c 0-3 $$ > /dev/null 2>&1 + +# Set maximum locked memory to unlimited +# ulimit -l unlimited + +# Set max UDP write buffer size to 4MB +sysctl -w net.core.wmem_max=4194304 +sysctl -w net.core.wmem_default=4194304 + + +# Set up LD_LIBRARY_PATH to prefer our local libraries and user libraries +export LD_LIBRARY_PATH=/opt/scifi/usr-libs:/opt/scifi/lib:$LD_LIBRARY_PATH + +# Launch the server +export SCIFI_ROOT=${SCIFI_ROOT:-/opt/scifi} +PATH_TO_EXE="$SCIFI_ROOT/bin/${SYNAPSE_APP_EXE}" +if [ ! -x "${PATH_TO_EXE}" ]; then + echo "Server binary not found or not executable" >&2 + exit 1 +fi + +exec "${PATH_TO_EXE}" "$@" \ No newline at end of file diff --git a/synapse/templates/app/ops/package/scripts/postinstall.sh b/synapse/templates/app/ops/package/scripts/postinstall.sh new file mode 100755 index 00000000..f1b73e56 --- /dev/null +++ b/synapse/templates/app/ops/package/scripts/postinstall.sh @@ -0,0 +1,17 @@ +#!/bin/bash +set -e + +# App name variable that will be replaced +SYNAPSE_APP_EXE="{{APP_NAME}}" + +# Set up and reload udev rules +udevadm control --reload-rules +udevadm trigger + +# Set permissions for the executable +chown root:root /opt/scifi/bin/"${SYNAPSE_APP_EXE}" +chmod 755 /opt/scifi/bin/"${SYNAPSE_APP_EXE}" + +# Reload and start the service +systemctl daemon-reload +systemctl enable "${SYNAPSE_APP_EXE}" \ No newline at end of file diff --git a/synapse/templates/app/ops/package/scripts/postremove.sh b/synapse/templates/app/ops/package/scripts/postremove.sh new file mode 100755 index 00000000..f1f5c52b --- /dev/null +++ b/synapse/templates/app/ops/package/scripts/postremove.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -e + +# Reload systemd +systemctl daemon-reload \ No newline at end of file diff --git a/synapse/templates/app/ops/package/scripts/preremove.sh b/synapse/templates/app/ops/package/scripts/preremove.sh new file mode 100755 index 00000000..45986afc --- /dev/null +++ b/synapse/templates/app/ops/package/scripts/preremove.sh @@ -0,0 +1,9 @@ +#!/bin/bash +set -e + +# App name variable that will be replaced +SYNAPSE_APP_EXE="{{APP_NAME}}" + +# Stop and disable the service +systemctl stop "${SYNAPSE_APP_EXE}" || true +systemctl disable "${SYNAPSE_APP_EXE}" || true \ No newline at end of file diff --git a/synapse/templates/app/ops/package/systemd/{{APP_NAME}}.service b/synapse/templates/app/ops/package/systemd/{{APP_NAME}}.service new file mode 100644 index 00000000..c7860ab8 --- /dev/null +++ b/synapse/templates/app/ops/package/systemd/{{APP_NAME}}.service @@ -0,0 +1,27 @@ +[Unit] +Description=Synapse Application +After=network-online.target + +# We need to wait for the network to be online before starting the service +Wants=network-online.target + +# We need to wait for the udev service to be ready before starting the server +Requires=systemd-udevd.service +After=systemd-udevd.service + +# TODO: Make user run as non-root +[Service] +Type=exec +User=root +Restart=no + +ExecStart=/bin/bash -c "/opt/scifi/scripts/launch_synapse_app.sh" + +# Working directory +WorkingDirectory=/opt/scifi + +# Environment variables +Environment=SCIFI_ROOT=/opt/scifi + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/synapse/templates/app/package.sh b/synapse/templates/app/package.sh new file mode 100644 index 00000000..a61bc8b0 --- /dev/null +++ b/synapse/templates/app/package.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -e + +# Detect architecture +ARCH=$(uname -m) +if [[ "${ARCH}" == "arm64" || "${ARCH}" == "aarch64" ]]; then + TAG_SUFFIX="arm64" +else + TAG_SUFFIX="amd64" +fi + +# Get the app name from the directory name or manifest.json +APP_NAME=$(basename "$(pwd)") +if [ -f "manifest.json" ]; then + # Try to extract the name from manifest.json + if command -v jq &> /dev/null; then + MANIFEST_NAME=$(jq -r '.name' manifest.json 2>/dev/null) + if [ -n "$MANIFEST_NAME" ] && [ "$MANIFEST_NAME" != "null" ]; then + APP_NAME=$MANIFEST_NAME + fi + fi +fi + +# Image name +IMAGE="${APP_NAME}:latest-${TAG_SUFFIX}" + +# Check if image exists +if ! docker image inspect $IMAGE >/dev/null 2>&1; then + echo "Image $IMAGE not found. Please run build_docker.sh first." + exit 1 +fi + +docker run -it \ + --rm \ + -v "$(pwd):/home/workspace" \ + $IMAGE \ + /bin/bash -c "cd /home/workspace && ./ops/package/package.sh" \ No newline at end of file diff --git a/synapse/templates/app/setup_app.py b/synapse/templates/app/setup_app.py new file mode 100644 index 00000000..302a80aa --- /dev/null +++ b/synapse/templates/app/setup_app.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 + +import os +import shutil +import argparse +import json +from pathlib import Path + +def setup_app_structure(app_dir, app_name): + """Setup the basic application structure""" + print(f"Setting up app structure for {app_name} in {app_dir}") + + # Create app directory if it doesn't exist + if not os.path.exists(app_dir): + os.makedirs(app_dir) + + # Get the template directory + script_dir = os.path.dirname(os.path.abspath(__file__)) + template_dir = script_dir + + # Copy necessary scripts + scripts = [ + 'build_docker.sh', + 'start_docker.sh', + 'package.sh' + ] + + for script in scripts: + source = os.path.join(template_dir, script) + destination = os.path.join(app_dir, script) + + if os.path.exists(source): + shutil.copy(source, destination) + # Make the script executable + os.chmod(destination, 0o755) + print(f"Copied {script} to {destination}") + + # Create ops directory structure + ops_dir = os.path.join(app_dir, 'ops') + os.makedirs(os.path.join(ops_dir, 'docker'), exist_ok=True) + os.makedirs(os.path.join(ops_dir, 'package'), exist_ok=True) + os.makedirs(os.path.join(ops_dir, 'package', 'scripts'), exist_ok=True) + os.makedirs(os.path.join(ops_dir, 'package', 'systemd'), exist_ok=True) + + # Create deploy directory and copy deploy script + deploy_dir = os.path.join(app_dir, 'deploy') + os.makedirs(deploy_dir, exist_ok=True) + + # Copy deploy scripts + deploy_source = os.path.join(template_dir, 'deploy') + for item in os.listdir(deploy_source): + source_item = os.path.join(deploy_source, item) + dest_item = os.path.join(deploy_dir, item) + + if os.path.isfile(source_item): + shutil.copy(source_item, dest_item) + # Make scripts executable + if item.endswith('.py') or item.endswith('.sh'): + os.chmod(dest_item, 0o755) + print(f"Copied {item} to {dest_item}") + + # Create basic package script + package_script = os.path.join(ops_dir, 'package', 'package.sh') + with open(package_script, 'w') as f: + f.write(f'''#!/bin/bash + +SYNAPSE_APP_VERSION="0.1.0" +SYNAPSE_APP_EXE="{app_name}" + +SCRIPT_DIR=$(dirname "$0") +SOURCE_DIR="${{SCRIPT_DIR}}/../../" +BUILD_DIR="${{SOURCE_DIR}}/build-aarch64/" + +STAGING_DIR="/tmp/synapse-package" +mkdir -p ${{STAGING_DIR}} + +# Binary install and setup +# TODO: Decide if there is a better place to put this +mkdir -p ${{STAGING_DIR}}/opt/scifi/bin +cp "${{BUILD_DIR}}/${{SYNAPSE_APP_EXE}}" "${{STAGING_DIR}}/opt/scifi/bin/" + +# Launch script +mkdir -p ${{STAGING_DIR}}/opt/scifi/scripts +cp "${{SCRIPT_DIR}}/scripts/launch_app.sh" "${{STAGING_DIR}}/opt/scifi/scripts/" + +# Systemd service install and setup +mkdir -p ${{STAGING_DIR}}/etc/systemd/system +cp "${{SCRIPT_DIR}}/systemd/{app_name}.service" "${{STAGING_DIR}}/etc/systemd/system/" + +fpm -s dir -t deb \\ + -n "${{SYNAPSE_APP_EXE}}" \\ + -f \\ + -v "${{SYNAPSE_APP_VERSION}}" \\ + -C ${{STAGING_DIR}} \\ + --deb-no-default-config-files \\ + --depends "systemd" \\ + --vendor "Science Corporation" \\ + --description "Synapse Application" \\ + --architecture arm64 \\ + --after-install "${{SCRIPT_DIR}}/scripts/postinstall.sh" \\ + --before-remove "${{SCRIPT_DIR}}/scripts/preremove.sh" \\ + --after-remove "${{SCRIPT_DIR}}/scripts/postremove.sh" \\ + . +''') + os.chmod(package_script, 0o755) + + # Create basic systemd service file + service_file = os.path.join(ops_dir, 'package', 'systemd', f'{app_name}.service') + with open(service_file, 'w') as f: + f.write(f'''[Unit] +Description={app_name} service +After=network.target + +[Service] +ExecStart=/opt/scifi/scripts/launch_app.sh +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +''') + + # Create launch script + launch_script = os.path.join(ops_dir, 'package', 'scripts', 'launch_app.sh') + with open(launch_script, 'w') as f: + f.write(f'''#!/bin/bash +set -e + +# Launch the application +/opt/scifi/bin/{app_name} +''') + os.chmod(launch_script, 0o755) + + # Create post-install script + postinstall_script = os.path.join(ops_dir, 'package', 'scripts', 'postinstall.sh') + with open(postinstall_script, 'w') as f: + f.write(f'''#!/bin/bash +set -e + +# Enable and start the service +systemctl daemon-reload +systemctl enable {app_name}.service +systemctl start {app_name}.service + +echo "{app_name} installed and started successfully" +''') + os.chmod(postinstall_script, 0o755) + + # Create pre-remove script + preremove_script = os.path.join(ops_dir, 'package', 'scripts', 'preremove.sh') + with open(preremove_script, 'w') as f: + f.write(f'''#!/bin/bash +set -e + +# Stop and disable the service +systemctl stop {app_name}.service +systemctl disable {app_name}.service + +echo "Stopped {app_name} service" +''') + os.chmod(preremove_script, 0o755) + + # Create post-remove script + postremove_script = os.path.join(ops_dir, 'package', 'scripts', 'postremove.sh') + with open(postremove_script, 'w') as f: + f.write(f'''#!/bin/bash +set -e + +# Reload systemd to remove the service +systemctl daemon-reload + +echo "Removed {app_name} service" +''') + os.chmod(postremove_script, 0o755) + + # Create basic Dockerfiles + dockerfile = os.path.join(ops_dir, 'docker', 'Dockerfile') + with open(dockerfile, 'w') as f: + f.write('''FROM ubuntu:22.04 + +ARG DEBIAN_FRONTEND=noninteractive + +# Install base dependencies +RUN apt-get update && apt-get install -y \\ + build-essential \\ + cmake \\ + pkg-config \\ + git \\ + ruby-dev \\ + curl \\ + jq \\ + && gem install fpm + +# Add a non-root user +RUN useradd -ms /bin/bash developer +USER developer +WORKDIR /home/workspace + +CMD ["/bin/bash"] +''') + + dockerfile_arm64 = os.path.join(ops_dir, 'docker', 'Dockerfile.arm64') + with open(dockerfile_arm64, 'w') as f: + f.write('''FROM ubuntu:22.04 + +ARG DEBIAN_FRONTEND=noninteractive + +# Install base dependencies +RUN apt-get update && apt-get install -y \\ + build-essential \\ + cmake \\ + pkg-config \\ + git \\ + ruby-dev \\ + curl \\ + jq \\ + && gem install fpm + +# Add a non-root user +RUN useradd -ms /bin/bash developer +USER developer +WORKDIR /home/workspace + +CMD ["/bin/bash"] +''') + + # Create manifest.json if it doesn't exist + manifest_path = os.path.join(app_dir, 'manifest.json') + if not os.path.exists(manifest_path): + manifest = { + "name": app_name, + "device_configuration": { + "nodes": [ + { + "type": "kApplicationNode", + "id": 2, + "application": { + "name": app_name + } + } + ] + } + } + + with open(manifest_path, 'w') as f: + json.dump(manifest, f, indent=2) + + print(f"Created basic manifest.json for {app_name}") + +def main(): + parser = argparse.ArgumentParser(description="Setup a new Synapse application structure") + parser.add_argument("app_name", help="Name of the application") + parser.add_argument("--app_dir", default=os.getcwd(), help="Directory to create the application in (default: current dir)") + + args = parser.parse_args() + + setup_app_structure(args.app_dir, args.app_name) + print(f"Application {args.app_name} setup complete!") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/synapse/templates/app/start_docker.sh b/synapse/templates/app/start_docker.sh new file mode 100644 index 00000000..34a82619 --- /dev/null +++ b/synapse/templates/app/start_docker.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -e + +# Detect architecture +ARCH=$(uname -m) +if [[ "${ARCH}" == "arm64" || "${ARCH}" == "aarch64" ]]; then + TAG_SUFFIX="arm64" +else + TAG_SUFFIX="amd64" +fi + +# Get the app name from the directory name or manifest.json +APP_NAME=$(basename "$(pwd)") +if [ -f "manifest.json" ]; then + # Try to extract the name from manifest.json + if command -v jq &> /dev/null; then + MANIFEST_NAME=$(jq -r '.name' manifest.json 2>/dev/null) + if [ -n "$MANIFEST_NAME" ] && [ "$MANIFEST_NAME" != "null" ]; then + APP_NAME=$MANIFEST_NAME + fi + fi +fi + +# Image name +IMAGE="${APP_NAME}:latest-${TAG_SUFFIX}" + +# Check if image exists +if ! docker image inspect $IMAGE >/dev/null 2>&1; then + echo "Image $IMAGE not found. Please run build_docker.sh first." + exit 1 +fi + +echo "Starting container for architecture: $ARCH" + +# Run the container with appropriate mounts +# Adjust volume mappings as needed for your project +docker run -it \ + --rm \ + -v "$(pwd):/home/workspace" \ + $IMAGE \ No newline at end of file From 604723db0032e3f81ae35a908f939b190a5d863f Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Wed, 7 May 2025 21:37:49 -0700 Subject: [PATCH 02/25] fix broken bash command --- synapse/cli/deploy.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index 09e1c0ad..f1bc7667 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -220,6 +220,7 @@ def package_app(app_dir): image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" # Run the packaging script in Docker - capture output + print(f"Running packaging script in Docker: {image}") cmd = [ 'docker', 'run', '-it', '--rm', '-v', f"{os.path.abspath(app_dir)}:/home/workspace", @@ -615,25 +616,25 @@ def build_app(app_dir, app_name): 'docker', 'run', '--rm', '-v', f"{os.path.abspath(app_dir)}:/home/workspace", image, - '/bin/bash', '-c', "cd /home/workspace && \ - if [ -f CMakePresets.json ]; then \ - # Use the existing presets if available\ - echo 'Using existing CMake presets...' && \ - cmake --preset=dynamic-aarch64 && \ - cmake --build --preset=cross-release -j$(nproc); \ - else \ - # Fall back to manual configuration\ - echo 'No CMake presets found, using manual configuration...' && \ - export VCPKG_DEFAULT_TRIPLET=arm64-linux-dynamic-release && \ + '/bin/bash', '-c', """cd /home/workspace && + if [ -f CMakePresets.json ]; then + # Use the existing presets if available + echo 'Using existing CMake presets...' && + cmake --preset=dynamic-aarch64 && + cmake --build --preset=cross-release -j$(nproc); + else + # Fall back to manual configuration + echo 'No CMake presets found, using manual configuration...' && + export VCPKG_DEFAULT_TRIPLET=arm64-linux-dynamic-release && cmake -B build -S . \ -DCMAKE_TOOLCHAIN_FILE=${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake \ -DVCPKG_TARGET_TRIPLET=arm64-linux-dynamic-release \ -DVCPKG_INSTALLED_DIR=${VCPKG_ROOT}/vcpkg_installed \ -DBUILD_SHARED_LIBS=ON \ -DCMAKE_BUILD_TYPE=Release \ - -DBUILD_FOR_ARM64=ON && \ - cmake --build build -j$(nproc); \ - fi" + -DBUILD_FOR_ARM64=ON && + cmake --build build -j$(nproc); + fi""" ] try: From c359bca04d763df5fdf2c0d1bf18dd3bef310fce Mon Sep 17 00:00:00 2001 From: Gilbert Montague Date: Wed, 7 May 2025 21:41:10 -0700 Subject: [PATCH 03/25] fixed triplet --- synapse/cli/deploy.py | 769 ++++++++++++++++++++++++++---------------- 1 file changed, 478 insertions(+), 291 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index f1bc7667..81b6d186 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -1,16 +1,19 @@ -import argparse import os import sys import subprocess import shutil import json -import tempfile import time import logging -from pathlib import Path from rich.console import Console from rich.panel import Panel -from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TimeElapsedColumn +from rich.progress import ( + Progress, + SpinnerColumn, + TextColumn, + BarColumn, + TimeElapsedColumn, +) from rich.prompt import Prompt from rich import box @@ -21,66 +24,78 @@ # Configure logging for paramiko to be less verbose logging.getLogger("paramiko").setLevel(logging.WARNING) + def validate_manifest(manifest_path): """Validate the manifest file exists and has required properties""" try: - with open(manifest_path, 'r') as f: + with open(manifest_path, "r") as f: manifest = json.load(f) - + # Basic validation - if 'name' not in manifest: - console.print(f"[bold red]Error:[/bold red] manifest.json is missing required 'name' property") + if "name" not in manifest: + console.print( + "[bold red]Error:[/bold red] manifest.json is missing required 'name' property" + ) return False - + return manifest except FileNotFoundError: - console.print(f"[bold red]Error:[/bold red] manifest.json not found in {manifest_path}") + console.print( + f"[bold red]Error:[/bold red] manifest.json not found in {manifest_path}" + ) return False except json.JSONDecodeError: - console.print(f"[bold red]Error:[/bold red] manifest.json is not valid JSON") + console.print("[bold red]Error:[/bold red] manifest.json is not valid JSON") return False + def replace_app_name_in_scripts(app_dir, app_name): """Replace the app name in packaging scripts""" - package_script = os.path.join(app_dir, 'ops', 'package', 'package.sh') - + package_script = os.path.join(app_dir, "ops", "package", "package.sh") + # List of scripts that need app name replacement script_paths = [ - os.path.join(app_dir, 'ops', 'package', 'package.sh'), - os.path.join(app_dir, 'ops', 'package', 'scripts', 'postinstall.sh'), - os.path.join(app_dir, 'ops', 'package', 'scripts', 'preremove.sh'), - os.path.join(app_dir, 'ops', 'package', 'scripts', 'launch_synapse_app.sh') + os.path.join(app_dir, "ops", "package", "package.sh"), + os.path.join(app_dir, "ops", "package", "scripts", "postinstall.sh"), + os.path.join(app_dir, "ops", "package", "scripts", "preremove.sh"), + os.path.join(app_dir, "ops", "package", "scripts", "launch_synapse_app.sh"), ] - + # Replace template variables in each script for script_path in script_paths: if os.path.exists(script_path): - with open(script_path, 'r') as f: + with open(script_path, "r") as f: content = f.read() - + # Replace the app name in the script - content = content.replace('{{APP_NAME}}', app_name) - content = content.replace('SYNAPSE_APP_EXE="synapse-example-app"', f'SYNAPSE_APP_EXE="{app_name}"') - - with open(script_path, 'w') as f: + content = content.replace("{{APP_NAME}}", app_name) + content = content.replace( + 'SYNAPSE_APP_EXE="synapse-example-app"', f'SYNAPSE_APP_EXE="{app_name}"' + ) + + with open(script_path, "w") as f: f.write(content) - + # Handle the systemd service file specially - it needs to be renamed - systemd_template = os.path.join(app_dir, 'ops', 'package', 'systemd', '{{APP_NAME}}.service') - systemd_target = os.path.join(app_dir, 'ops', 'package', 'systemd', f'{app_name}.service') - + systemd_template = os.path.join( + app_dir, "ops", "package", "systemd", "{{APP_NAME}}.service" + ) + systemd_target = os.path.join( + app_dir, "ops", "package", "systemd", f"{app_name}.service" + ) + if os.path.exists(systemd_template): # Read template content - with open(systemd_template, 'r') as f: + with open(systemd_template, "r") as f: content = f.read() - + # Replace template variables - content = content.replace('{{APP_NAME}}', app_name) - + content = content.replace("{{APP_NAME}}", app_name) + # Write to the new file - with open(systemd_target, 'w') as f: + with open(systemd_target, "w") as f: f.write(content) - + # Remove the template file if it's different from the target if systemd_template != systemd_target: try: @@ -88,24 +103,27 @@ def replace_app_name_in_scripts(app_dir, app_name): except: pass + def package_app(app_dir): """Package the application into a .deb file""" # Check if we're in a Docker container - if os.path.exists('/.dockerenv'): + if os.path.exists("/.dockerenv"): # We're inside Docker, directly run the package script - package_script = os.path.join(app_dir, 'ops', 'package', 'package.sh') + package_script = os.path.join(app_dir, "ops", "package", "package.sh") if not os.path.exists(package_script): - console.print(f"[bold red]Error:[/bold red] Package script not found at {package_script}") + console.print( + f"[bold red]Error:[/bold red] Package script not found at {package_script}" + ) return False # Make sure the script is executable os.chmod(package_script, 0o755) - + # Make sure all the other scripts are executable too - script_dir = os.path.join(app_dir, 'ops', 'package', 'scripts') + script_dir = os.path.join(app_dir, "ops", "package", "scripts") if os.path.exists(script_dir): for script in os.listdir(script_dir): - if script.endswith('.sh'): + if script.endswith(".sh"): script_path = os.path.join(script_dir, script) os.chmod(script_path, 0o755) @@ -114,218 +132,285 @@ def package_app(app_dir): TextColumn("[bold blue]{task.description}[/bold blue]"), BarColumn(), TimeElapsedColumn(), - console=console + console=console, ) as progress: task = progress.add_task("[yellow]Packaging application...", total=1) - + # Run the package script try: - subprocess.run(['bash', package_script], check=True, cwd=app_dir) + subprocess.run(["bash", package_script], check=True, cwd=app_dir) progress.update(task, advance=1) return True except subprocess.CalledProcessError as e: - console.print(f"[bold red]Error:[/bold red] Failed to package application: {e}") + console.print( + f"[bold red]Error:[/bold red] Failed to package application: {e}" + ) return False else: # We're outside Docker, need to use docker to package - script_path = os.path.join(app_dir, 'build_docker.sh') - + script_path = os.path.join(app_dir, "build_docker.sh") + # Check if build_docker.sh exists in app_dir if not os.path.exists(script_path): # Use the one from synapse-python instead script_dir = os.path.dirname(os.path.abspath(__file__)) - template_dir = os.path.join(script_dir, '..', 'templates', 'app') - script_path = os.path.join(template_dir, 'build_docker.sh') - + template_dir = os.path.join(script_dir, "..", "templates", "app") + script_path = os.path.join(template_dir, "build_docker.sh") + if not os.path.exists(script_path): - console.print(f"[bold red]Error:[/bold red] Docker build script not found") + console.print( + "[bold red]Error:[/bold red] Docker build script not found" + ) return False - + # Copy the script to the app directory - shutil.copy(script_path, os.path.join(app_dir, 'build_docker.sh')) - script_path = os.path.join(app_dir, 'build_docker.sh') - + shutil.copy(script_path, os.path.join(app_dir, "build_docker.sh")) + script_path = os.path.join(app_dir, "build_docker.sh") + # Also check if the ops directory exists, if not, copy template files - ops_dir = os.path.join(app_dir, 'ops') - if not os.path.exists(ops_dir) or not os.path.exists(os.path.join(ops_dir, 'package', 'package.sh')): + ops_dir = os.path.join(app_dir, "ops") + if not os.path.exists(ops_dir) or not os.path.exists( + os.path.join(ops_dir, "package", "package.sh") + ): # Create the ops directory structure - os.makedirs(os.path.join(app_dir, 'ops', 'package', 'scripts'), exist_ok=True) - os.makedirs(os.path.join(app_dir, 'ops', 'package', 'systemd'), exist_ok=True) - + os.makedirs( + os.path.join(app_dir, "ops", "package", "scripts"), exist_ok=True + ) + os.makedirs( + os.path.join(app_dir, "ops", "package", "systemd"), exist_ok=True + ) + # Copy template files from synapse-python - template_ops_dir = os.path.join(template_dir, 'ops') + template_ops_dir = os.path.join(template_dir, "ops") if os.path.exists(template_ops_dir): # Copy package.sh - package_sh = os.path.join(template_ops_dir, 'package', 'package.sh') + package_sh = os.path.join(template_ops_dir, "package", "package.sh") if os.path.exists(package_sh): - shutil.copy(package_sh, os.path.join(app_dir, 'ops', 'package', 'package.sh')) - + shutil.copy( + package_sh, + os.path.join(app_dir, "ops", "package", "package.sh"), + ) + # Copy scripts - scripts_dir = os.path.join(template_ops_dir, 'package', 'scripts') + scripts_dir = os.path.join(template_ops_dir, "package", "scripts") if os.path.exists(scripts_dir): for script in os.listdir(scripts_dir): src = os.path.join(scripts_dir, script) - dst = os.path.join(app_dir, 'ops', 'package', 'scripts', script) + dst = os.path.join( + app_dir, "ops", "package", "scripts", script + ) shutil.copy(src, dst) - + # Copy systemd files - systemd_dir = os.path.join(template_ops_dir, 'package', 'systemd') + systemd_dir = os.path.join(template_ops_dir, "package", "systemd") if os.path.exists(systemd_dir): for file in os.listdir(systemd_dir): src = os.path.join(systemd_dir, file) - dst = os.path.join(app_dir, 'ops', 'package', 'systemd', file) + dst = os.path.join( + app_dir, "ops", "package", "systemd", file + ) shutil.copy(src, dst) # Make sure the script is executable os.chmod(script_path, 0o755) - + with Progress( SpinnerColumn(), TextColumn("[bold blue]{task.description}[/bold blue]"), BarColumn(), TimeElapsedColumn(), - console=console + console=console, ) as progress: build_task = progress.add_task("[yellow]Building Docker image...", total=1) - + # Build the Docker image - capture output to prevent interference with progress try: # Use capture_output to prevent Docker output from interfering with progress bars - result = subprocess.run(['bash', script_path], check=True, cwd=app_dir, - capture_output=True, text=True) - + result = subprocess.run( + ["bash", script_path], + check=True, + cwd=app_dir, + capture_output=True, + text=True, + ) + # Only log errors if they occur, otherwise just update progress - if result.stderr and ("error" in result.stderr.lower() or "fail" in result.stderr.lower()): + if result.stderr and ( + "error" in result.stderr.lower() or "fail" in result.stderr.lower() + ): console.print(f"[bold red]Warning:[/bold red] {result.stderr}") - + progress.update(build_task, advance=1) except subprocess.CalledProcessError as e: - console.print(f"[bold red]Error:[/bold red] Failed to build Docker image: {e}") + console.print( + f"[bold red]Error:[/bold red] Failed to build Docker image: {e}" + ) if e.stderr: console.print(f"[red]{e.stderr}[/red]") return False - - package_task = progress.add_task("[yellow]Packaging application...", total=1) - + + package_task = progress.add_task( + "[yellow]Packaging application...", total=1 + ) + # Now run package.sh in the Docker container try: # Detect architecture - arch = subprocess.check_output(['uname', '-m']).decode('utf-8').strip() - if arch in ['arm64', 'aarch64']: - tag_suffix = 'arm64' + arch = subprocess.check_output(["uname", "-m"]).decode("utf-8").strip() + if arch in ["arm64", "aarch64"]: + tag_suffix = "arm64" else: - tag_suffix = 'amd64' - + tag_suffix = "amd64" + # Image name image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" - + # Run the packaging script in Docker - capture output print(f"Running packaging script in Docker: {image}") cmd = [ - 'docker', 'run', '-it', '--rm', - '-v', f"{os.path.abspath(app_dir)}:/home/workspace", + "docker", + "run", + "-it", + "--rm", + "-v", + f"{os.path.abspath(app_dir)}:/home/workspace", image, - '/bin/bash', '-c', "cd /home/workspace && ./ops/package/package.sh" + "/bin/bash", + "-c", + "cd /home/workspace && ./ops/package/package.sh", ] - + # Capture output to prevent it from interfering with progress bars - result = subprocess.run(cmd, check=True, cwd=app_dir, - capture_output=True, text=True) - + result = subprocess.run( + cmd, check=True, cwd=app_dir, capture_output=True, text=True + ) + # Only log errors if they occur - if result.stderr and ("error" in result.stderr.lower() or "fail" in result.stderr.lower()): + if result.stderr and ( + "error" in result.stderr.lower() or "fail" in result.stderr.lower() + ): console.print(f"[bold red]Warning:[/bold red] {result.stderr}") - + progress.update(package_task, advance=1) - + # Display success message after completion console.print("[green]Package created successfully![/green]") return True except subprocess.CalledProcessError as e: - console.print(f"[bold red]Error:[/bold red] Failed to package application: {e}") + console.print( + f"[bold red]Error:[/bold red] Failed to package application: {e}" + ) if e.stderr: console.print(f"[red]{e.stderr}[/red]") return False + def find_deb_package(app_dir): """Find the generated .deb package in the app directory""" for file in os.listdir(app_dir): - if file.endswith('.deb'): + if file.endswith(".deb"): return os.path.join(app_dir, file) - - console.print(f"[bold red]Error:[/bold red] Could not find .deb package in {app_dir}") + + console.print( + f"[bold red]Error:[/bold red] Could not find .deb package in {app_dir}" + ) return None + def get_device_credentials(ip_address): """Get user credentials with clear prompts""" console.print() - console.print(Panel( - f"[bold yellow]Device Connection Details[/bold yellow]\n[white]Target device:[/white] [green]{ip_address}[/green]", - border_style="blue" - )) - + console.print( + Panel( + f"[bold yellow]Device Connection Details[/bold yellow]\n[white]Target device:[/white] [green]{ip_address}[/green]", + border_style="blue", + ) + ) + username = Prompt.ask("Enter login username", default="scifi") - + import getpass - console.print("[bold blue]Enter login password (input will be hidden):[/bold blue]", end=" ") + + console.print( + "[bold blue]Enter login password (input will be hidden):[/bold blue]", end=" " + ) login_password = getpass.getpass("") - - console.print("[bold blue]Enter root password for package installation (input will be hidden):[/bold blue]", end=" ") + + console.print( + "[bold blue]Enter root password for package installation (input will be hidden):[/bold blue]", + end=" ", + ) root_password = getpass.getpass("") - + console.print() return username, login_password, root_password + def deploy_package(ip_address, deb_package_path): """Deploy the package to the device""" package_filename = os.path.basename(deb_package_path) - + # Stop any previous progress display console.clear_live() - + # Get cached credentials or prompt for new ones cached_ip, username, login_password, root_password = load_cached_credentials() - + # If no cached credentials or they don't match our target IP, prompt for new ones - if not cached_ip or cached_ip != ip_address or not username or not login_password or not root_password: + if ( + not cached_ip + or cached_ip != ip_address + or not username + or not login_password + or not root_password + ): username, login_password, root_password = get_device_credentials(ip_address) - + with Progress( SpinnerColumn(), TextColumn("[bold blue]{task.description}[/bold blue]"), BarColumn(), TimeElapsedColumn(), - console=console + console=console, ) as progress: - deploy_task = progress.add_task(f"[yellow]Deploying to {ip_address}...", total=3) - + deploy_task = progress.add_task( + f"[yellow]Deploying to {ip_address}...", total=3 + ) + try: # Deploy directly using paramiko client = None sftp = None shell = None - + # Create SSH client import paramiko + client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - + # Connect to the device (connection task) connect_task = progress.add_task("[green]Connecting to device...", total=1) - + try: - client.connect(ip_address, username=username, password=login_password, timeout=10) + client.connect( + ip_address, username=username, password=login_password, timeout=10 + ) progress.update(connect_task, completed=1) progress.update(deploy_task, advance=1) except Exception as e: progress.update(connect_task, visible=False) - console.print(f"[bold red]Error connecting to {ip_address}:[/bold red] {str(e)}") - console.print("[yellow]Please check your username and password.[/yellow]") + console.print( + f"[bold red]Error connecting to {ip_address}:[/bold red] {str(e)}" + ) + console.print( + "[yellow]Please check your username and password.[/yellow]" + ) return False - + # Upload file task upload_task = progress.add_task("[cyan]Uploading package...", total=1) - + try: # Create SFTP client and upload sftp = client.open_sftp() @@ -337,75 +422,81 @@ def deploy_package(ip_address, deb_package_path): progress.update(upload_task, visible=False) console.print(f"[bold red]Error uploading package:[/bold red] {str(e)}") return False - + # Install task install_task = progress.add_task("[magenta]Installing package...", total=1) - + try: # Use expect-like behavior with Paramiko to handle su shell = client.invoke_shell() - + # Set up a way to collect output output = "" - + # Send su command shell.send("su -\n") time.sleep(1) # Wait for password prompt - + # Send root password shell.send(f"{root_password}\n") time.sleep(1) # Wait for su to authenticate - + # Send dpkg command shell.send(f"dpkg -i {remote_path}\n") time.sleep(5) # Give dpkg time to run - + # Exit from root shell shell.send("exit\n") time.sleep(0.5) - + # Collect the final output while shell.recv_ready(): - chunk = shell.recv(4096).decode('utf-8') + chunk = shell.recv(4096).decode("utf-8") output += chunk - + # Check for common error indicators if "error" in output.lower() or "failed" in output.lower(): progress.update(install_task, visible=False) progress.update(deploy_task, visible=False) - console.print(Panel( - f"[bold red]Installation Error[/bold red]\n\n{output}", - title="Deployment Failed", - border_style="red", - box=box.DOUBLE - )) + console.print( + Panel( + f"[bold red]Installation Error[/bold red]\n\n{output}", + title="Deployment Failed", + border_style="red", + box=box.DOUBLE, + ) + ) return False - + # Cleanup shell.send(f"rm {remote_path}\n") time.sleep(0.5) - + # Mark installation as complete progress.update(install_task, completed=1) progress.update(deploy_task, advance=1) - + # Save successful credentials save_credentials(ip_address, username, login_password, root_password) - - console.print(Panel( - f"[bold green]Successfully deployed[/bold green] [yellow]{package_filename}[/yellow] [bold green]to[/bold green] [blue]{ip_address}[/blue]", - title="Deployment Successful", - border_style="green", - box=box.DOUBLE - )) + + console.print( + Panel( + f"[bold green]Successfully deployed[/bold green] [yellow]{package_filename}[/yellow] [bold green]to[/bold green] [blue]{ip_address}[/blue]", + title="Deployment Successful", + border_style="green", + box=box.DOUBLE, + ) + ) return True - + except Exception as e: progress.update(install_task, visible=False) progress.update(deploy_task, visible=False) - console.print(f"[bold red]Error during installation:[/bold red] {str(e)}") + console.print( + f"[bold red]Error during installation:[/bold red] {str(e)}" + ) return False - + except Exception as e: progress.update(deploy_task, visible=False) console.print(f"[bold red]Error:[/bold red] Failed to deploy package: {e}") @@ -422,118 +513,152 @@ def deploy_package(ip_address, deb_package_path): except: pass + def load_cached_credentials(): """Load cached credentials from the config file""" cache_file = ".synapse_deploy_cache.json" try: if os.path.exists(cache_file): - with open(cache_file, 'r') as f: + with open(cache_file, "r") as f: data = json.load(f) - ip_address = data.get('ip_address') - username = data.get('username', 'scifi') - encoded_login_password = data.get('encoded_login_password') - encoded_root_password = data.get('encoded_root_password') - + ip_address = data.get("ip_address") + username = data.get("username", "scifi") + encoded_login_password = data.get("encoded_login_password") + encoded_root_password = data.get("encoded_root_password") + if encoded_login_password and encoded_root_password: import base64 - login_password = base64.b64decode(encoded_login_password).decode('utf-8') - root_password = base64.b64decode(encoded_root_password).decode('utf-8') - console.print(f"[green]Using cached credentials for [bold]{username}@{ip_address}[/bold][/green]") + + login_password = base64.b64decode(encoded_login_password).decode( + "utf-8" + ) + root_password = base64.b64decode(encoded_root_password).decode( + "utf-8" + ) + console.print( + f"[green]Using cached credentials for [bold]{username}@{ip_address}[/bold][/green]" + ) return ip_address, username, login_password, root_password except Exception as e: - console.print(f"[yellow]Warning: Failed to load cached credentials: {e}[/yellow]") + console.print( + f"[yellow]Warning: Failed to load cached credentials: {e}[/yellow]" + ) return None, None, None, None + def save_credentials(ip_address, username, login_password, root_password): """Save credentials to cache file""" cache_file = ".synapse_deploy_cache.json" try: import base64 - with open(cache_file, 'w') as f: + + with open(cache_file, "w") as f: data = { - 'ip_address': ip_address, - 'username': username, - 'encoded_login_password': base64.b64encode(login_password.encode('utf-8')).decode('utf-8'), - 'encoded_root_password': base64.b64encode(root_password.encode('utf-8')).decode('utf-8') + "ip_address": ip_address, + "username": username, + "encoded_login_password": base64.b64encode( + login_password.encode("utf-8") + ).decode("utf-8"), + "encoded_root_password": base64.b64encode( + root_password.encode("utf-8") + ).decode("utf-8"), } json.dump(data, f) os.chmod(cache_file, 0o600) # Restrict file permissions except Exception as e: console.print(f"[yellow]Warning: Failed to save credentials: {e}[/yellow]") + def start_app(ip_address, app_name): """Start the application on the device""" # Stop any previous progress display console.clear_live() - + # Get cached credentials or prompt for new ones cached_ip, username, login_password, root_password = load_cached_credentials() - + # If no cached credentials or they don't match our target IP, prompt for new ones - if not cached_ip or cached_ip != ip_address or not username or not login_password or not root_password: + if ( + not cached_ip + or cached_ip != ip_address + or not username + or not login_password + or not root_password + ): username, login_password, root_password = get_device_credentials(ip_address) - + with Progress( SpinnerColumn(), TextColumn("[bold blue]{task.description}[/bold blue]"), - console=console + console=console, ) as progress: - task = progress.add_task(f"[yellow]Starting {app_name} on {ip_address}...", total=1) - + task = progress.add_task( + f"[yellow]Starting {app_name} on {ip_address}...", total=1 + ) + try: # Start the app using paramiko import paramiko + client = None shell = None - + try: # Create SSH client client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - + # Connect to the device - client.connect(ip_address, username=username, password=login_password, timeout=10) - + client.connect( + ip_address, username=username, password=login_password, timeout=10 + ) + # Use interactive shell to run commands with sudo shell = client.invoke_shell() - + # Send the command to start the application shell.send(f"sudo systemctl start {app_name}.service\n") time.sleep(2) # Wait for the command to execute - + # Collect output to check for errors output = "" while shell.recv_ready(): - chunk = shell.recv(4096).decode('utf-8') + chunk = shell.recv(4096).decode("utf-8") output += chunk - + # If there's an error, we'll usually see it in the output if "error" in output.lower() or "failed" in output.lower(): progress.update(task, visible=False) - console.print(f"[bold red]Error:[/bold red] Failed to start application:\n{output}") + console.print( + f"[bold red]Error:[/bold red] Failed to start application:\n{output}" + ) return False - + # Save successful credentials save_credentials(ip_address, username, login_password, root_password) - + progress.update(task, advance=1) - - console.print(Panel( - f"[bold green]Successfully started[/bold green] [yellow]{app_name}[/yellow] [bold green]on[/bold green] [blue]{ip_address}[/blue]", - title="Application Started", - border_style="green", - box=box.DOUBLE - )) + + console.print( + Panel( + f"[bold green]Successfully started[/bold green] [yellow]{app_name}[/yellow] [bold green]on[/bold green] [blue]{ip_address}[/blue]", + title="Application Started", + border_style="green", + box=box.DOUBLE, + ) + ) return True - + except Exception as e: progress.update(task, visible=False) console.print(f"[bold red]Error:[/bold red] {str(e)}") return False - + except Exception as e: progress.update(task, visible=False) - console.print(f"[bold red]Error:[/bold red] Failed to start application: {e}") + console.print( + f"[bold red]Error:[/bold red] Failed to start application: {e}" + ) return False finally: # Clean up connections @@ -545,224 +670,286 @@ def start_app(ip_address, app_name): except: pass + def build_app(app_dir, app_name): """Build the application binary before packaging""" console.print(f"[yellow]Building application: {app_name}...[/yellow]") - + # Check if binary already exists binary_paths = [ - os.path.join(app_dir, 'build-aarch64', app_name), - os.path.join(app_dir, 'build', app_name), - os.path.join(app_dir, 'build-arm64', app_name), - os.path.join(app_dir, 'out', app_name) + os.path.join(app_dir, "build-aarch64", app_name), + os.path.join(app_dir, "build", app_name), + os.path.join(app_dir, "build-arm64", app_name), + os.path.join(app_dir, "out", app_name), ] - + for path in binary_paths: if os.path.exists(path): console.print(f"[green]Binary already exists at: {path}[/green]") return True - + # Binary doesn't exist, build it console.print("[yellow]Binary not found, attempting to build...[/yellow]") - + # Detect architecture - arch = subprocess.check_output(['uname', '-m']).decode('utf-8').strip() - if arch in ['arm64', 'aarch64']: - tag_suffix = 'arm64' + arch = subprocess.check_output(["uname", "-m"]).decode("utf-8").strip() + if arch in ["arm64", "aarch64"]: + tag_suffix = "arm64" else: - tag_suffix = 'amd64' - + tag_suffix = "amd64" + # Image name image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" - + # Check if Docker image exists try: - subprocess.run(['docker', 'image', 'inspect', image], check=True, - stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + subprocess.run( + ["docker", "image", "inspect", image], + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) console.print(f"[green]Docker image {image} already exists.[/green]") except subprocess.CalledProcessError: # Docker image doesn't exist, build it - console.print(f"[yellow]Docker image {image} not found, building it first...[/yellow]") - build_docker_script = os.path.join(app_dir, 'build_docker.sh') + console.print( + f"[yellow]Docker image {image} not found, building it first...[/yellow]" + ) + build_docker_script = os.path.join(app_dir, "build_docker.sh") try: # Run the build script without capturing output so user can see progress - console.print(f"[blue]Running build_docker.sh...[/blue]") - subprocess.run(['bash', build_docker_script], check=True, cwd=app_dir) - console.print(f"[green]Successfully built Docker image.[/green]") + console.print("[blue]Running build_docker.sh...[/blue]") + subprocess.run(["bash", build_docker_script], check=True, cwd=app_dir) + console.print("[green]Successfully built Docker image.[/green]") except subprocess.CalledProcessError as e: - console.print(f"[bold red]Error:[/bold red] Failed to build Docker image: {e}") + console.print( + f"[bold red]Error:[/bold red] Failed to build Docker image: {e}" + ) return False - + # Now build the app in Docker console.print("[yellow]Building application in Docker container...[/yellow]") - console.print("[dim]This may take a few minutes. You'll see output during the build process.[/dim]") - + console.print( + "[dim]This may take a few minutes. You'll see output during the build process.[/dim]" + ) + # First, try to run vcpkg to install dependencies vcpkg_cmd = [ - 'docker', 'run', '--rm', - '-v', f"{os.path.abspath(app_dir)}:/home/workspace", + "docker", + "run", + "--rm", + "-v", + f"{os.path.abspath(app_dir)}:/home/workspace", image, - '/bin/bash', '-c', "cd /home/workspace && if [ -f vcpkg.json ]; then echo 'Installing dependencies from vcpkg.json...'; ${VCPKG_ROOT}/vcpkg install --triplet arm64-linux-dynamic-release; fi" + "/bin/bash", + "-c", + "cd /home/workspace && if [ -f vcpkg.json ]; then echo 'Installing dependencies from vcpkg.json...'; ${VCPKG_ROOT}/vcpkg install --triplet arm64-linux-dynamic-release; fi", ] - + try: console.print("[blue]Installing dependencies...[/blue]") subprocess.run(vcpkg_cmd, check=True, cwd=app_dir) - except subprocess.CalledProcessError as e: - console.print(f"[yellow]Warning: Failed to install dependencies. The build might still succeed.[/yellow]") - + except subprocess.CalledProcessError: + console.print( + "[yellow]Warning: Failed to install dependencies. The build might still succeed.[/yellow]" + ) + # Now run the actual build command with a proper CMake preset build_cmd = [ - 'docker', 'run', '--rm', - '-v', f"{os.path.abspath(app_dir)}:/home/workspace", + "docker", + "run", + "--rm", + "-v", + f"{os.path.abspath(app_dir)}:/home/workspace", image, - '/bin/bash', '-c', """cd /home/workspace && - if [ -f CMakePresets.json ]; then + "/bin/bash", + "-c", + """cd /home/workspace && + if [ -f CMakePresets.json ]; then # Use the existing presets if available - echo 'Using existing CMake presets...' && - cmake --preset=dynamic-aarch64 && - cmake --build --preset=cross-release -j$(nproc); - else + echo 'Using existing CMake presets...' && + cmake --preset=dynamic-aarch64 -DVCPKG_TARGET_TRIPLET="arm64-linux-dynamic-release" && + cmake --build --preset=cross-release -j$(nproc); + else # Fall back to manual configuration - echo 'No CMake presets found, using manual configuration...' && - export VCPKG_DEFAULT_TRIPLET=arm64-linux-dynamic-release && + echo 'No CMake presets found, using manual configuration...' && + export VCPKG_DEFAULT_TRIPLET=arm64-linux-dynamic-release && cmake -B build -S . \ -DCMAKE_TOOLCHAIN_FILE=${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake \ -DVCPKG_TARGET_TRIPLET=arm64-linux-dynamic-release \ -DVCPKG_INSTALLED_DIR=${VCPKG_ROOT}/vcpkg_installed \ -DBUILD_SHARED_LIBS=ON \ -DCMAKE_BUILD_TYPE=Release \ - -DBUILD_FOR_ARM64=ON && - cmake --build build -j$(nproc); - fi""" + -DBUILD_FOR_ARM64=ON && + cmake --build build -j$(nproc); + fi""", ] - + try: # Run without capturing output so the user can see progress console.print("[blue]Running build command...[/blue]") subprocess.run(build_cmd, check=True, cwd=app_dir) - + # Check if build succeeded for path in binary_paths: if os.path.exists(path): console.print(f"[green]Successfully built binary at: {path}[/green]") return True - + # If we get here, the build might have succeeded but we can't find the binary - console.print("[bold yellow]Warning: Build completed but binary not found in expected locations.[/bold yellow]") + console.print( + "[bold yellow]Warning: Build completed but binary not found in expected locations.[/bold yellow]" + ) # Try to find it manually binary_path = subprocess.run( - ['find', app_dir, '-type', 'f', '-name', app_name, '-not', '-path', '*/.*'], - capture_output=True, text=True, check=False + ["find", app_dir, "-type", "f", "-name", app_name, "-not", "-path", "*/.*"], + capture_output=True, + text=True, + check=False, ).stdout.strip() - + if binary_path: - binary_path = binary_path.split('\n')[0] # Take the first match if multiple + binary_path = binary_path.split("\n")[0] # Take the first match if multiple console.print(f"[green]Found binary at: {binary_path}[/green]") - + # Try to copy it to one of the standard locations - build_dir = os.path.join(app_dir, 'build') + build_dir = os.path.join(app_dir, "build") os.makedirs(build_dir, exist_ok=True) shutil.copy(binary_path, os.path.join(build_dir, app_name)) - console.print(f"[green]Copied binary to: {os.path.join(build_dir, app_name)}[/green]") + console.print( + f"[green]Copied binary to: {os.path.join(build_dir, app_name)}[/green]" + ) return True - + return False - except subprocess.CalledProcessError as e: - console.print(f"[bold red]Error:[/bold red] Failed to build application. Check the CMake output above for details.") + except subprocess.CalledProcessError: + console.print( + "[bold red]Error:[/bold red] Failed to build application. Check the CMake output above for details." + ) return False + def deploy_cmd(args): """Handle the deploy command""" # Check for required modules and install them if missing try: import paramiko except ImportError: - console.print("[yellow]Required module 'paramiko' not found. Attempting to install...[/yellow]") + console.print( + "[yellow]Required module 'paramiko' not found. Attempting to install...[/yellow]" + ) try: - subprocess.check_call([sys.executable, "-m", "pip", "install", "paramiko>=2.7.2"]) + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "paramiko>=2.7.2"] + ) console.print("[green]Successfully installed paramiko.[/green]") # Re-import after installing - import paramiko except Exception as e: - console.print(f"[bold red]Error:[/bold red] Failed to install paramiko: {e}") - console.print("[yellow]Please manually install required dependencies:[/yellow]") + console.print( + f"[bold red]Error:[/bold red] Failed to install paramiko: {e}" + ) + console.print( + "[yellow]Please manually install required dependencies:[/yellow]" + ) console.print("pip install paramiko>=2.7.2") return - + # Get absolute path of app directory app_dir = os.path.abspath(args.app_dir) - + # Validate manifest.json - manifest_path = os.path.join(app_dir, 'manifest.json') + manifest_path = os.path.join(app_dir, "manifest.json") manifest = validate_manifest(manifest_path) if not manifest: return - + # Get app name from manifest - app_name = manifest['name'] + app_name = manifest["name"] console.print(f"[bold]Deploying application:[/bold] [yellow]{app_name}[/yellow]") - + # First, build the app if not build_app(app_dir, app_name): console.print("[bold red]Error:[/bold red] Failed to build the application.") return - + # Replace app name in packaging scripts replace_app_name_in_scripts(app_dir, app_name) - + # Package the app if not package_app(app_dir): return - + # Find the generated .deb package deb_package = find_deb_package(app_dir) if not deb_package: return - + # Deploy the package to the device - uri = getattr(args, 'uri', None) + uri = getattr(args, "uri", None) if uri: deploy_package(uri, deb_package) else: - console.print("[yellow]No URI provided. Package created but not deployed.[/yellow]") + console.print( + "[yellow]No URI provided. Package created but not deployed.[/yellow]" + ) console.print(f"[green]Package available at:[/green] {deb_package}") + def start_app_cmd(args): """Handle the start-app command""" # Check for required modules and install them if missing try: import paramiko except ImportError: - console.print("[yellow]Required module 'paramiko' not found. Attempting to install...[/yellow]") + console.print( + "[yellow]Required module 'paramiko' not found. Attempting to install...[/yellow]" + ) try: - subprocess.check_call([sys.executable, "-m", "pip", "install", "paramiko>=2.7.2"]) + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "paramiko>=2.7.2"] + ) console.print("[green]Successfully installed paramiko.[/green]") # Re-import after installing - import paramiko except Exception as e: - console.print(f"[bold red]Error:[/bold red] Failed to install paramiko: {e}") - console.print("[yellow]Please manually install required dependencies:[/yellow]") + console.print( + f"[bold red]Error:[/bold red] Failed to install paramiko: {e}" + ) + console.print( + "[yellow]Please manually install required dependencies:[/yellow]" + ) console.print("pip install paramiko>=2.7.2") return - - uri = getattr(args, 'uri', None) + + uri = getattr(args, "uri", None) if not uri: - console.print("[bold red]Error:[/bold red] No URI provided. Cannot start the application.") + console.print( + "[bold red]Error:[/bold red] No URI provided. Cannot start the application." + ) return - + start_app(uri, args.app_name) + def add_commands(subparsers): """Add deploy commands to the CLI""" # Deploy command - deploy_parser = subparsers.add_parser('deploy', help='Deploy an application to a Synapse device') - deploy_parser.add_argument('app_dir', nargs='?', default='.', help='Path to the application directory') - deploy_parser.add_argument('--uri', '-u', help='Device IP address to deploy to', type=str) + deploy_parser = subparsers.add_parser( + "deploy", help="Deploy an application to a Synapse device" + ) + deploy_parser.add_argument( + "app_dir", nargs="?", default=".", help="Path to the application directory" + ) + deploy_parser.add_argument( + "--uri", "-u", help="Device IP address to deploy to", type=str + ) deploy_parser.set_defaults(func=deploy_cmd) - + # Start app command - start_app_parser = subparsers.add_parser('start-app', help='Start an application on a Synapse device') - start_app_parser.add_argument('app_name', help='Name of the application to start') - start_app_parser.add_argument('--uri', '-u', help='Device IP address to start the app on', type=str) - start_app_parser.set_defaults(func=start_app_cmd) \ No newline at end of file + start_app_parser = subparsers.add_parser( + "start-app", help="Start an application on a Synapse device" + ) + start_app_parser.add_argument("app_name", help="Name of the application to start") + start_app_parser.add_argument( + "--uri", "-u", help="Device IP address to start the app on", type=str + ) + start_app_parser.set_defaults(func=start_app_cmd) From eae0d461e943e2f093da57a5ff9c15aa22090cdd Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Wed, 7 May 2025 21:48:21 -0700 Subject: [PATCH 04/25] get uri from main --- .gitignore | 2 ++ synapse/cli/deploy.py | 9 ++------- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index dbe68da1..5cb90610 100644 --- a/.gitignore +++ b/.gitignore @@ -184,3 +184,5 @@ output_*.json *.jsonl .scienv synapse_data* + +.synapse_deploy_cache.json diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index 81b6d186..b6909f94 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -885,7 +885,8 @@ def deploy_cmd(args): return # Deploy the package to the device - uri = getattr(args, "uri", None) + uri = args.uri + print(f"Deploying package to: {uri}") if uri: deploy_package(uri, deb_package) else: @@ -939,9 +940,6 @@ def add_commands(subparsers): deploy_parser.add_argument( "app_dir", nargs="?", default=".", help="Path to the application directory" ) - deploy_parser.add_argument( - "--uri", "-u", help="Device IP address to deploy to", type=str - ) deploy_parser.set_defaults(func=deploy_cmd) # Start app command @@ -949,7 +947,4 @@ def add_commands(subparsers): "start-app", help="Start an application on a Synapse device" ) start_app_parser.add_argument("app_name", help="Name of the application to start") - start_app_parser.add_argument( - "--uri", "-u", help="Device IP address to start the app on", type=str - ) start_app_parser.set_defaults(func=start_app_cmd) From 069743f7370ecf8e0cefb8f7fa6885c9efd516ef Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Thu, 8 May 2025 14:49:48 -0700 Subject: [PATCH 05/25] finish moving all build functionality over from example app repo --- synapse/cli/deploy.py | 247 ++++++++++---------------- synapse/templates/app/build_docker.sh | 23 --- 2 files changed, 95 insertions(+), 175 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index b6909f94..88789443 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -49,62 +49,7 @@ def validate_manifest(manifest_path): return False -def replace_app_name_in_scripts(app_dir, app_name): - """Replace the app name in packaging scripts""" - package_script = os.path.join(app_dir, "ops", "package", "package.sh") - - # List of scripts that need app name replacement - script_paths = [ - os.path.join(app_dir, "ops", "package", "package.sh"), - os.path.join(app_dir, "ops", "package", "scripts", "postinstall.sh"), - os.path.join(app_dir, "ops", "package", "scripts", "preremove.sh"), - os.path.join(app_dir, "ops", "package", "scripts", "launch_synapse_app.sh"), - ] - - # Replace template variables in each script - for script_path in script_paths: - if os.path.exists(script_path): - with open(script_path, "r") as f: - content = f.read() - - # Replace the app name in the script - content = content.replace("{{APP_NAME}}", app_name) - content = content.replace( - 'SYNAPSE_APP_EXE="synapse-example-app"', f'SYNAPSE_APP_EXE="{app_name}"' - ) - - with open(script_path, "w") as f: - f.write(content) - - # Handle the systemd service file specially - it needs to be renamed - systemd_template = os.path.join( - app_dir, "ops", "package", "systemd", "{{APP_NAME}}.service" - ) - systemd_target = os.path.join( - app_dir, "ops", "package", "systemd", f"{app_name}.service" - ) - - if os.path.exists(systemd_template): - # Read template content - with open(systemd_template, "r") as f: - content = f.read() - - # Replace template variables - content = content.replace("{{APP_NAME}}", app_name) - - # Write to the new file - with open(systemd_target, "w") as f: - f.write(content) - - # Remove the template file if it's different from the target - if systemd_template != systemd_target: - try: - os.remove(systemd_template) - except: - pass - - -def package_app(app_dir): +def package_app(app_dir, app_name): """Package the application into a .deb file""" # Check if we're in a Docker container if os.path.exists("/.dockerenv"): @@ -148,71 +93,24 @@ def package_app(app_dir): return False else: # We're outside Docker, need to use docker to package - script_path = os.path.join(app_dir, "build_docker.sh") - - # Check if build_docker.sh exists in app_dir - if not os.path.exists(script_path): - # Use the one from synapse-python instead - script_dir = os.path.dirname(os.path.abspath(__file__)) - template_dir = os.path.join(script_dir, "..", "templates", "app") - script_path = os.path.join(template_dir, "build_docker.sh") - - if not os.path.exists(script_path): - console.print( - "[bold red]Error:[/bold red] Docker build script not found" - ) - return False - - # Copy the script to the app directory - shutil.copy(script_path, os.path.join(app_dir, "build_docker.sh")) - script_path = os.path.join(app_dir, "build_docker.sh") - - # Also check if the ops directory exists, if not, copy template files - ops_dir = os.path.join(app_dir, "ops") - if not os.path.exists(ops_dir) or not os.path.exists( - os.path.join(ops_dir, "package", "package.sh") - ): - # Create the ops directory structure - os.makedirs( - os.path.join(app_dir, "ops", "package", "scripts"), exist_ok=True - ) - os.makedirs( - os.path.join(app_dir, "ops", "package", "systemd"), exist_ok=True - ) - - # Copy template files from synapse-python - template_ops_dir = os.path.join(template_dir, "ops") - if os.path.exists(template_ops_dir): - # Copy package.sh - package_sh = os.path.join(template_ops_dir, "package", "package.sh") - if os.path.exists(package_sh): - shutil.copy( - package_sh, - os.path.join(app_dir, "ops", "package", "package.sh"), - ) - - # Copy scripts - scripts_dir = os.path.join(template_ops_dir, "package", "scripts") - if os.path.exists(scripts_dir): - for script in os.listdir(scripts_dir): - src = os.path.join(scripts_dir, script) - dst = os.path.join( - app_dir, "ops", "package", "scripts", script - ) - shutil.copy(src, dst) - - # Copy systemd files - systemd_dir = os.path.join(template_ops_dir, "package", "systemd") - if os.path.exists(systemd_dir): - for file in os.listdir(systemd_dir): - src = os.path.join(systemd_dir, file) - dst = os.path.join( - app_dir, "ops", "package", "systemd", file - ) - shutil.copy(src, dst) - + # Always use the build_docker.sh from the synapse-python package directly + script_dir = os.path.dirname(os.path.abspath(__file__)) + synapse_root = os.path.abspath(os.path.join(script_dir, "..", "..")) + build_docker_script = os.path.join(synapse_root, "synapse", "templates", "app", "build_docker.sh") + + if not os.path.exists(build_docker_script): + console.print( + f"[bold red]Error:[/bold red] Could not find Docker build script at {build_docker_script}" + ) + return False + # Make sure the script is executable - os.chmod(script_path, 0o755) + os.chmod(build_docker_script, 0o755) + + # Path to ops templates inside synapse-python + template_ops_dir = os.path.join( + synapse_root, "synapse", "templates", "app", "ops" + ) with Progress( SpinnerColumn(), @@ -227,7 +125,7 @@ def package_app(app_dir): try: # Use capture_output to prevent Docker output from interfering with progress bars result = subprocess.run( - ["bash", script_path], + ["bash", build_docker_script], check=True, cwd=app_dir, capture_output=True, @@ -253,33 +151,67 @@ def package_app(app_dir): "[yellow]Packaging application...", total=1 ) - # Now run package.sh in the Docker container + # Detect host architecture so we can use the correct image tag + arch = subprocess.check_output(["uname", "-m"]).decode("utf-8").strip() + if arch in ["arm64", "aarch64"]: + tag_suffix = "arm64" + else: + tag_suffix = "amd64" + + image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" + + # Compose a bash script that prepares the template files and then runs the + # packaging script. All placeholder replacements and SOURCE_DIR + # overrides happen entirely inside the container so that nothing ever + # gets written back to the application repository. + + bash_cmd = f''' +set -e +APP_NAME="{app_name}" + +# Copy the template ops directory to a temporary working area inside the container +TEMPLATE_DIR="/synapse_ops" +TEMP_DIR="/tmp/synapse_package_ops" +rm -rf "$TEMP_DIR" +cp -r "$TEMPLATE_DIR" "$TEMP_DIR" + +# Replace placeholders in every file +find "$TEMP_DIR" -type f -exec sed -i 's/{{{{APP_NAME}}}}/'"$APP_NAME"'/g' {{}} + + +# Rename the systemd service template to the correct name +# The template file is literally called "{{APP_NAME}}.service" (double braces). +if [ -f "$TEMP_DIR/package/systemd/{{{{APP_NAME}}}}.service" ]; then + mv "$TEMP_DIR/package/systemd/{{{{APP_NAME}}}}.service" "$TEMP_DIR/package/systemd/$APP_NAME.service" +fi + +# Ensure the packaging script is executable and points to the correct source dir +chmod +x "$TEMP_DIR/package/package.sh" +sed -i 's|SOURCE_DIR=.*|SOURCE_DIR="/home/workspace"|' "$TEMP_DIR/package/package.sh" + +# Finally, run the packaging script from the workspace root so that the .deb lands +# in the application directory that is mounted from the host. +cd /home/workspace +bash "$TEMP_DIR/package/package.sh" +''' + + cmd = [ + "docker", + "run", + "-i", + "--rm", + "-v", + f"{os.path.abspath(app_dir)}:/home/workspace", + "-v", + f"{template_ops_dir}:/synapse_ops:ro", + image, + "/bin/bash", + "-c", + bash_cmd, + ] + + # Run the packaging script in Docker - capture output + print(f"Running packaging script in Docker: {image}") try: - # Detect architecture - arch = subprocess.check_output(["uname", "-m"]).decode("utf-8").strip() - if arch in ["arm64", "aarch64"]: - tag_suffix = "arm64" - else: - tag_suffix = "amd64" - - # Image name - image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" - - # Run the packaging script in Docker - capture output - print(f"Running packaging script in Docker: {image}") - cmd = [ - "docker", - "run", - "-it", - "--rm", - "-v", - f"{os.path.abspath(app_dir)}:/home/workspace", - image, - "/bin/bash", - "-c", - "cd /home/workspace && ./ops/package/package.sh", - ] - # Capture output to prevent it from interfering with progress bars result = subprocess.run( cmd, check=True, cwd=app_dir, capture_output=True, text=True @@ -715,7 +647,21 @@ def build_app(app_dir, app_name): console.print( f"[yellow]Docker image {image} not found, building it first...[/yellow]" ) - build_docker_script = os.path.join(app_dir, "build_docker.sh") + + # Always use the build_docker.sh from the synapse-python package directly + script_dir = os.path.dirname(os.path.abspath(__file__)) + synapse_root = os.path.abspath(os.path.join(script_dir, "..", "..")) + build_docker_script = os.path.join(synapse_root, "synapse", "templates", "app", "build_docker.sh") + + if not os.path.exists(build_docker_script): + console.print( + f"[bold red]Error:[/bold red] Could not find Docker build script at {build_docker_script}" + ) + return False + + # Make sure the script is executable + os.chmod(build_docker_script, 0o755) + try: # Run the build script without capturing output so user can see progress console.print("[blue]Running build_docker.sh...[/blue]") @@ -872,11 +818,8 @@ def deploy_cmd(args): console.print("[bold red]Error:[/bold red] Failed to build the application.") return - # Replace app name in packaging scripts - replace_app_name_in_scripts(app_dir, app_name) - # Package the app - if not package_app(app_dir): + if not package_app(app_dir, app_name): return # Find the generated .deb package diff --git a/synapse/templates/app/build_docker.sh b/synapse/templates/app/build_docker.sh index 55b18b45..082062fb 100755 --- a/synapse/templates/app/build_docker.sh +++ b/synapse/templates/app/build_docker.sh @@ -35,26 +35,3 @@ echo "Application name: $APP_NAME" docker build -t $SDK_IMAGE -f "${DOCKERFILE_PATH}" . echo "Successfully built $SDK_IMAGE" - -# Check if the binary exists, and if not, try to build it -if [ ! -f "build/${APP_NAME}" ] && [ ! -f "build-aarch64/${APP_NAME}" ] && [ ! -f "build-arm64/${APP_NAME}" ]; then - echo "Binary not found, attempting to build it in Docker..." - # Try to build the binary in Docker - docker run --rm \ - -v "$(pwd):/home/workspace" \ - $SDK_IMAGE \ - /bin/bash -c "cd /home/workspace && \ - mkdir -p build && \ - cd build && \ - cmake .. && \ - make -j$(nproc)" - - # Check if build succeeded - if [ -f "build/${APP_NAME}" ]; then - echo "Binary built successfully at build/${APP_NAME}" - else - echo "Warning: Could not automatically build binary. Make sure it's available before packaging." - fi -else - echo "Binary already exists, skipping build step." -fi \ No newline at end of file From bb7e3f60c63919f1fcbf1d28ddd1ec5bbd6fc148 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Thu, 8 May 2025 14:59:34 -0700 Subject: [PATCH 06/25] small cleanup --- synapse/cli/deploy.py | 157 +++++++++++++++++++++--------------------- 1 file changed, 77 insertions(+), 80 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index 88789443..f3ab1933 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -94,9 +94,7 @@ def package_app(app_dir, app_name): else: # We're outside Docker, need to use docker to package # Always use the build_docker.sh from the synapse-python package directly - script_dir = os.path.dirname(os.path.abspath(__file__)) - synapse_root = os.path.abspath(os.path.join(script_dir, "..", "..")) - build_docker_script = os.path.join(synapse_root, "synapse", "templates", "app", "build_docker.sh") + build_docker_script = get_build_docker_script() if not os.path.exists(build_docker_script): console.print( @@ -108,9 +106,7 @@ def package_app(app_dir, app_name): os.chmod(build_docker_script, 0o755) # Path to ops templates inside synapse-python - template_ops_dir = os.path.join( - synapse_root, "synapse", "templates", "app", "ops" - ) + template_ops_dir = get_template_ops_dir() with Progress( SpinnerColumn(), @@ -121,9 +117,10 @@ def package_app(app_dir, app_name): ) as progress: build_task = progress.add_task("[yellow]Building Docker image...", total=1) - # Build the Docker image - capture output to prevent interference with progress + # ------------------------------------------------------------------ + # STEP 1: Build the Docker image used for packaging the application + # ------------------------------------------------------------------ try: - # Use capture_output to prevent Docker output from interfering with progress bars result = subprocess.run( ["bash", build_docker_script], check=True, @@ -132,34 +129,26 @@ def package_app(app_dir, app_name): text=True, ) - # Only log errors if they occur, otherwise just update progress - if result.stderr and ( - "error" in result.stderr.lower() or "fail" in result.stderr.lower() - ): + # Log any warnings emitted by the build step so they are not lost + if result.stderr and any(word in result.stderr.lower() for word in ("error", "fail")): console.print(f"[bold red]Warning:[/bold red] {result.stderr}") + # Mark the *build* step as complete progress.update(build_task, advance=1) - except subprocess.CalledProcessError as e: - console.print( - f"[bold red]Error:[/bold red] Failed to build Docker image: {e}" - ) - if e.stderr: - console.print(f"[red]{e.stderr}[/red]") + except subprocess.CalledProcessError as exc: + console.print(f"[bold red]Error:[/bold red] Failed to build Docker image: {exc}") + if exc.stderr: + console.print(f"[red]{exc.stderr}[/red]") return False - package_task = progress.add_task( - "[yellow]Packaging application...", total=1 - ) - - # Detect host architecture so we can use the correct image tag - arch = subprocess.check_output(["uname", "-m"]).decode("utf-8").strip() - if arch in ["arm64", "aarch64"]: - tag_suffix = "arm64" - else: - tag_suffix = "amd64" + # ------------------------------------------------------------------ + # STEP 2: Package the application inside the freshly-built container + # ------------------------------------------------------------------ + package_task = progress.add_task("[yellow]Packaging application...", total=1) + tag_suffix = detect_arch() image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" - + # Compose a bash script that prepares the template files and then runs the # packaging script. All placeholder replacements and SOURCE_DIR # overrides happen entirely inside the container so that nothing ever @@ -624,11 +613,7 @@ def build_app(app_dir, app_name): console.print("[yellow]Binary not found, attempting to build...[/yellow]") # Detect architecture - arch = subprocess.check_output(["uname", "-m"]).decode("utf-8").strip() - if arch in ["arm64", "aarch64"]: - tag_suffix = "arm64" - else: - tag_suffix = "amd64" + tag_suffix = detect_arch() # Image name image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" @@ -648,10 +633,8 @@ def build_app(app_dir, app_name): f"[yellow]Docker image {image} not found, building it first...[/yellow]" ) - # Always use the build_docker.sh from the synapse-python package directly - script_dir = os.path.dirname(os.path.abspath(__file__)) - synapse_root = os.path.abspath(os.path.join(script_dir, "..", "..")) - build_docker_script = os.path.join(synapse_root, "synapse", "templates", "app", "build_docker.sh") + # Always use the shared build_docker.sh script + build_docker_script = get_build_docker_script() if not os.path.exists(build_docker_script): console.print( @@ -777,28 +760,11 @@ def build_app(app_dir, app_name): def deploy_cmd(args): """Handle the deploy command""" - # Check for required modules and install them if missing + # Ensure paramiko dependency is available try: - import paramiko - except ImportError: - console.print( - "[yellow]Required module 'paramiko' not found. Attempting to install...[/yellow]" - ) - try: - subprocess.check_call( - [sys.executable, "-m", "pip", "install", "paramiko>=2.7.2"] - ) - console.print("[green]Successfully installed paramiko.[/green]") - # Re-import after installing - except Exception as e: - console.print( - f"[bold red]Error:[/bold red] Failed to install paramiko: {e}" - ) - console.print( - "[yellow]Please manually install required dependencies:[/yellow]" - ) - console.print("pip install paramiko>=2.7.2") - return + ensure_paramiko() + except Exception: + return # Get absolute path of app directory app_dir = os.path.abspath(args.app_dir) @@ -841,28 +807,11 @@ def deploy_cmd(args): def start_app_cmd(args): """Handle the start-app command""" - # Check for required modules and install them if missing + # Ensure paramiko dependency is available try: - import paramiko - except ImportError: - console.print( - "[yellow]Required module 'paramiko' not found. Attempting to install...[/yellow]" - ) - try: - subprocess.check_call( - [sys.executable, "-m", "pip", "install", "paramiko>=2.7.2"] - ) - console.print("[green]Successfully installed paramiko.[/green]") - # Re-import after installing - except Exception as e: - console.print( - f"[bold red]Error:[/bold red] Failed to install paramiko: {e}" - ) - console.print( - "[yellow]Please manually install required dependencies:[/yellow]" - ) - console.print("pip install paramiko>=2.7.2") - return + ensure_paramiko() + except Exception: + return uri = getattr(args, "uri", None) if not uri: @@ -891,3 +840,51 @@ def add_commands(subparsers): ) start_app_parser.add_argument("app_name", help="Name of the application to start") start_app_parser.set_defaults(func=start_app_cmd) + +# --------------------------------------------------------------------------- +# Helper utilities shared across this module +# --------------------------------------------------------------------------- + +def ensure_paramiko(): + """Import paramiko, installing it on the fly if it is missing. + + This logic was previously duplicated in multiple command handlers. The + helper makes the intent explicit and keeps the main code paths concise. + """ + try: + import importlib # noqa: WPS433 – used intentionally for runtime import + importlib.import_module("paramiko") + except ImportError: + console.print( + "[yellow]Required module 'paramiko' not found. Attempting to install...[/yellow]" + ) + try: + subprocess.check_call([sys.executable, "-m", "pip", "install", "paramiko>=2.7.2"]) + console.print("[green]Successfully installed paramiko.[/green]") + except Exception as exc: # pragma: no cover – best-effort installation + console.print(f"[bold red]Error:[/bold red] Failed to install paramiko: {exc}") + console.print("[yellow]Please manually install required dependencies:[/yellow]") + console.print("pip install paramiko>=2.7.2") + raise + + +def get_synapse_root() -> str: + """Return the absolute path to the *synapse-python* repository root.""" + script_dir = os.path.dirname(os.path.abspath(__file__)) + return os.path.abspath(os.path.join(script_dir, "..", "..")) + + +def get_build_docker_script() -> str: + """Return the canonical *build_docker.sh* path used throughout the tool.""" + return os.path.join(get_synapse_root(), "synapse", "templates", "app", "build_docker.sh") + + +def get_template_ops_dir() -> str: + """Return the path to the shared *ops* template directory.""" + return os.path.join(get_synapse_root(), "synapse", "templates", "app", "ops") + + +def detect_arch() -> str: + """Return an architecture tag suffix (``arm64`` or ``amd64``).""" + arch = subprocess.check_output(["uname", "-m"]).decode("utf-8").strip() + return "arm64" if arch in ("arm64", "aarch64") else "amd64" From 55362356490f2b729363cff7a1836cbb0c89cc47 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Thu, 8 May 2025 15:27:22 -0700 Subject: [PATCH 07/25] add guards to ensure docker is installed and running --- synapse/cli/deploy.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index f3ab1933..cafa39a9 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -760,6 +760,10 @@ def build_app(app_dir, app_name): def deploy_cmd(args): """Handle the deploy command""" + # Ensure Docker is available and running + if not ensure_docker(): + return + # Ensure paramiko dependency is available try: ensure_paramiko() @@ -888,3 +892,34 @@ def detect_arch() -> str: """Return an architecture tag suffix (``arm64`` or ``amd64``).""" arch = subprocess.check_output(["uname", "-m"]).decode("utf-8").strip() return "arm64" if arch in ("arm64", "aarch64") else "amd64" + + +# --------------------------------------------------------------------------- +# Environment sanity-check helpers +# --------------------------------------------------------------------------- + +def ensure_docker() -> bool: + """Return True if the *docker* CLI is available and the daemon responds. + + Prints a clear, user-friendly message and returns ``False`` otherwise so the + caller can abort early. + """ + if shutil.which("docker") is None: + console.print( + "[bold red]Error:[/bold red] Docker CLI not found. Please install Docker before running this command." + ) + return False + + try: + subprocess.run( + ["docker", "info"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + check=True, + ) + return True + except subprocess.CalledProcessError: + console.print( + "[bold red]Error:[/bold red] Docker daemon does not appear to be running. Please start Docker and try again." + ) + return False From 94493d079f476d6ef6120841ac7c39f607829612 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Thu, 8 May 2025 15:40:20 -0700 Subject: [PATCH 08/25] delete some convenience scripts that aren't strictly necessary anymore --- synapse/templates/app/deploy/deploy.py | 253 ----------------- synapse/templates/app/deploy/requirements.txt | 2 - synapse/templates/app/package.sh | 37 --- synapse/templates/app/setup_app.py | 261 ------------------ synapse/templates/app/start_docker.sh | 40 --- 5 files changed, 593 deletions(-) delete mode 100644 synapse/templates/app/deploy/deploy.py delete mode 100644 synapse/templates/app/deploy/requirements.txt delete mode 100644 synapse/templates/app/package.sh delete mode 100644 synapse/templates/app/setup_app.py delete mode 100644 synapse/templates/app/start_docker.sh diff --git a/synapse/templates/app/deploy/deploy.py b/synapse/templates/app/deploy/deploy.py deleted file mode 100644 index 92fef6b0..00000000 --- a/synapse/templates/app/deploy/deploy.py +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import getpass -import paramiko -import json -import base64 -import time -from pathlib import Path -from rich.console import Console -from rich.panel import Panel -from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TimeElapsedColumn -from rich.prompt import Prompt, Confirm -from rich.text import Text -from rich import box - -# Initialize Rich console -console = Console() - -# Configuration -CACHE_FILE = ".synapse_deploy_cache.json" - -def get_credentials(): - """Prompt for device credentials with rich formatting""" - console.print("[bold yellow]Device Connection Details[/bold yellow]") - ip_address = Prompt.ask("Enter SciFi device IP address") - username = Prompt.ask("Enter login username", default="scifi") - login_password = getpass.getpass("Enter login password: ") - root_password = getpass.getpass("Enter root password for package installation: ") - return ip_address, username, login_password, root_password - -def load_cached_credentials(): - """Load cached credentials if they exist""" - try: - if os.path.exists(CACHE_FILE): - with console.status("[bold blue]Loading cached credentials...[/bold blue]"): - with open(CACHE_FILE, 'r') as f: - data = json.load(f) - ip_address = data.get('ip_address') - username = data.get('username', 'admin') - encoded_login_password = data.get('encoded_login_password') - encoded_root_password = data.get('encoded_root_password') - - if encoded_login_password and encoded_root_password: - login_password = base64.b64decode(encoded_login_password).decode('utf-8') - root_password = base64.b64decode(encoded_root_password).decode('utf-8') - console.print(f"[green]Using cached credentials for [bold]{username}@{ip_address}[/bold][/green]") - return ip_address, username, login_password, root_password - except Exception as e: - console.print(f"[yellow]Warning: Failed to load cached credentials: {e}[/yellow]") - return None, None, None, None - -def save_credentials(ip_address, username, login_password, root_password): - """Save credentials to cache file""" - try: - with console.status("[bold blue]Saving credentials...[/bold blue]"): - with open(CACHE_FILE, 'w') as f: - data = { - 'ip_address': ip_address, - 'username': username, - 'encoded_login_password': base64.b64encode(login_password.encode('utf-8')).decode('utf-8'), - 'encoded_root_password': base64.b64encode(root_password.encode('utf-8')).decode('utf-8') - } - json.dump(data, f) - os.chmod(CACHE_FILE, 0o600) # Restrict file permissions - console.print("[green]Credentials saved successfully[/green]") - except Exception as e: - console.print(f"[yellow]Warning: Failed to save credentials: {e}[/yellow]") - -def deploy_package(ip_address, username, login_password, root_password, deb_package): - """Deploy and install the deb package to the SciFi device""" - package_filename = os.path.basename(deb_package) - - with Progress( - SpinnerColumn(), - TextColumn("[bold blue]{task.description}[/bold blue]"), - BarColumn(), - TimeElapsedColumn(), - console=console - ) as progress: - # Setup overall task - overall_task = progress.add_task("[yellow]Overall deployment progress...", total=4) - - # Connect to device - connect_task = progress.add_task(f"[green]Connecting as {username}@{ip_address}...", total=1) - try: - # Create SSH client - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - - # Connect to the device - client.connect(ip_address, username=username, password=login_password, timeout=10) - progress.update(connect_task, advance=1) - progress.update(overall_task, advance=1) - - # Create SFTP task - transfer_task = progress.add_task(f"[cyan]Transferring {package_filename}...", total=1) - - # Create SFTP client - sftp = client.open_sftp() - remote_path = f"/tmp/{package_filename}" - - # Upload the package - sftp.put(deb_package, remote_path) - progress.update(transfer_task, advance=1) - progress.update(overall_task, advance=1) - - # Install task - install_task = progress.add_task("[magenta]Installing package with root privileges...", total=1) - - # Use expect-like behavior with Paramiko to handle su - # First, we create an interactive shell session - shell = client.invoke_shell() - - # Set up a way to collect output - output = "" - - # Send su command - shell.send("su -\n") - time.sleep(1) # Wait for password prompt - - # Send root password - shell.send(f"{root_password}\n") - time.sleep(1) # Wait for su to authenticate - - # Send dpkg command - shell.send(f"dpkg -i {remote_path}\n") - time.sleep(3) # Give dpkg time to run - - # Exit from root shell - shell.send("exit\n") - time.sleep(0.5) - - # Collect the final output - while shell.recv_ready(): - chunk = shell.recv(4096).decode('utf-8') - output += chunk - - # Check for common error indicators - if "error" in output.lower() or "failed" in output.lower(): - progress.update(install_task, completed=1, visible=False) - progress.update(overall_task, visible=False) - console.print(Panel( - f"[bold red]Installation Error[/bold red]\n\n{output}", - title="Deployment Failed", - border_style="red", - box=box.DOUBLE - )) - return False - - # Complete the tasks - progress.update(install_task, advance=1) - progress.update(overall_task, advance=1) - - # Cleanup task - cleanup_task = progress.add_task("[blue]Cleaning up...", total=1) - shell.send(f"rm {remote_path}\n") - time.sleep(0.5) - progress.update(cleanup_task, advance=1) - progress.update(overall_task, advance=1) - - console.print(Panel( - f"[bold green]Successfully deployed[/bold green] [yellow]{package_filename}[/yellow] [bold green]to[/bold green] [blue]{ip_address}[/blue]", - title="Deployment Successful", - border_style="green", - box=box.DOUBLE - )) - return True - - except Exception as e: - progress.update(overall_task, visible=False) - console.print(Panel( - f"[bold red]Connection Error[/bold red]\n\n{str(e)}", - title="Deployment Failed", - border_style="red", - box=box.DOUBLE - )) - return False - finally: - try: - if 'shell' in locals(): - shell.close() - if 'sftp' in locals(): - sftp.close() - if 'client' in locals(): - client.close() - except: - pass - -def main(): - # Print welcome banner - console.print(Panel( - "[bold]Synapse App Deployment Tool[/bold]", - border_style="blue", - box=box.ROUNDED - )) - - # Check if a .deb package was provided - if len(sys.argv) < 2: - console.print("[bold red]Error:[/bold red] No .deb package specified.") - console.print(f"Usage: {sys.argv[0]} path/to/package.deb") - sys.exit(1) - - deb_package = sys.argv[1] - - # Check if the IP address was provided as a second argument - ip_address = None - if len(sys.argv) > 2: - ip_address = sys.argv[2] - - # Check if the .deb package exists - if not os.path.isfile(deb_package): - console.print(f"[bold red]Error:[/bold red] The specified .deb package does not exist: [yellow]{deb_package}[/yellow]") - sys.exit(1) - - # Show package info - package_size = os.path.getsize(deb_package) / (1024 * 1024) # Size in MB - console.print(f"[bold cyan]Package:[/bold cyan] [yellow]{os.path.basename(deb_package)}[/yellow] ([cyan]{package_size:.2f} MB[/cyan])") - - # Load cached credentials or use provided IP - username = None - login_password = None - root_password = None - - if ip_address is None: - ip_address, username, login_password, root_password = load_cached_credentials() - - # If no cached credentials or IP was provided but no credentials, prompt for them - if not ip_address or not username or not login_password or not root_password: - if ip_address: - console.print(f"[bold]Using target device:[/bold] [yellow]{ip_address}[/yellow]") - username = Prompt.ask("Enter login username", default="scifi") - login_password = getpass.getpass("Enter login password: ") - root_password = getpass.getpass("Enter root password for package installation: ") - else: - ip_address, username, login_password, root_password = get_credentials() - - # Try to deploy until successful - while True: - if deploy_package(ip_address, username, login_password, root_password, deb_package): - # Save successful credentials - save_credentials(ip_address, username, login_password, root_password) - break - else: - if Confirm.ask("[yellow]Would you like to retry with different credentials?[/yellow]"): - ip_address, username, login_password, root_password = get_credentials() - else: - console.print("[bold red]Deployment aborted by user.[/bold red]") - sys.exit(1) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/synapse/templates/app/deploy/requirements.txt b/synapse/templates/app/deploy/requirements.txt deleted file mode 100644 index ed2e064b..00000000 --- a/synapse/templates/app/deploy/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -paramiko>=2.7.2 -rich>=10.0.0 \ No newline at end of file diff --git a/synapse/templates/app/package.sh b/synapse/templates/app/package.sh deleted file mode 100644 index a61bc8b0..00000000 --- a/synapse/templates/app/package.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -set -e - -# Detect architecture -ARCH=$(uname -m) -if [[ "${ARCH}" == "arm64" || "${ARCH}" == "aarch64" ]]; then - TAG_SUFFIX="arm64" -else - TAG_SUFFIX="amd64" -fi - -# Get the app name from the directory name or manifest.json -APP_NAME=$(basename "$(pwd)") -if [ -f "manifest.json" ]; then - # Try to extract the name from manifest.json - if command -v jq &> /dev/null; then - MANIFEST_NAME=$(jq -r '.name' manifest.json 2>/dev/null) - if [ -n "$MANIFEST_NAME" ] && [ "$MANIFEST_NAME" != "null" ]; then - APP_NAME=$MANIFEST_NAME - fi - fi -fi - -# Image name -IMAGE="${APP_NAME}:latest-${TAG_SUFFIX}" - -# Check if image exists -if ! docker image inspect $IMAGE >/dev/null 2>&1; then - echo "Image $IMAGE not found. Please run build_docker.sh first." - exit 1 -fi - -docker run -it \ - --rm \ - -v "$(pwd):/home/workspace" \ - $IMAGE \ - /bin/bash -c "cd /home/workspace && ./ops/package/package.sh" \ No newline at end of file diff --git a/synapse/templates/app/setup_app.py b/synapse/templates/app/setup_app.py deleted file mode 100644 index 302a80aa..00000000 --- a/synapse/templates/app/setup_app.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env python3 - -import os -import shutil -import argparse -import json -from pathlib import Path - -def setup_app_structure(app_dir, app_name): - """Setup the basic application structure""" - print(f"Setting up app structure for {app_name} in {app_dir}") - - # Create app directory if it doesn't exist - if not os.path.exists(app_dir): - os.makedirs(app_dir) - - # Get the template directory - script_dir = os.path.dirname(os.path.abspath(__file__)) - template_dir = script_dir - - # Copy necessary scripts - scripts = [ - 'build_docker.sh', - 'start_docker.sh', - 'package.sh' - ] - - for script in scripts: - source = os.path.join(template_dir, script) - destination = os.path.join(app_dir, script) - - if os.path.exists(source): - shutil.copy(source, destination) - # Make the script executable - os.chmod(destination, 0o755) - print(f"Copied {script} to {destination}") - - # Create ops directory structure - ops_dir = os.path.join(app_dir, 'ops') - os.makedirs(os.path.join(ops_dir, 'docker'), exist_ok=True) - os.makedirs(os.path.join(ops_dir, 'package'), exist_ok=True) - os.makedirs(os.path.join(ops_dir, 'package', 'scripts'), exist_ok=True) - os.makedirs(os.path.join(ops_dir, 'package', 'systemd'), exist_ok=True) - - # Create deploy directory and copy deploy script - deploy_dir = os.path.join(app_dir, 'deploy') - os.makedirs(deploy_dir, exist_ok=True) - - # Copy deploy scripts - deploy_source = os.path.join(template_dir, 'deploy') - for item in os.listdir(deploy_source): - source_item = os.path.join(deploy_source, item) - dest_item = os.path.join(deploy_dir, item) - - if os.path.isfile(source_item): - shutil.copy(source_item, dest_item) - # Make scripts executable - if item.endswith('.py') or item.endswith('.sh'): - os.chmod(dest_item, 0o755) - print(f"Copied {item} to {dest_item}") - - # Create basic package script - package_script = os.path.join(ops_dir, 'package', 'package.sh') - with open(package_script, 'w') as f: - f.write(f'''#!/bin/bash - -SYNAPSE_APP_VERSION="0.1.0" -SYNAPSE_APP_EXE="{app_name}" - -SCRIPT_DIR=$(dirname "$0") -SOURCE_DIR="${{SCRIPT_DIR}}/../../" -BUILD_DIR="${{SOURCE_DIR}}/build-aarch64/" - -STAGING_DIR="/tmp/synapse-package" -mkdir -p ${{STAGING_DIR}} - -# Binary install and setup -# TODO: Decide if there is a better place to put this -mkdir -p ${{STAGING_DIR}}/opt/scifi/bin -cp "${{BUILD_DIR}}/${{SYNAPSE_APP_EXE}}" "${{STAGING_DIR}}/opt/scifi/bin/" - -# Launch script -mkdir -p ${{STAGING_DIR}}/opt/scifi/scripts -cp "${{SCRIPT_DIR}}/scripts/launch_app.sh" "${{STAGING_DIR}}/opt/scifi/scripts/" - -# Systemd service install and setup -mkdir -p ${{STAGING_DIR}}/etc/systemd/system -cp "${{SCRIPT_DIR}}/systemd/{app_name}.service" "${{STAGING_DIR}}/etc/systemd/system/" - -fpm -s dir -t deb \\ - -n "${{SYNAPSE_APP_EXE}}" \\ - -f \\ - -v "${{SYNAPSE_APP_VERSION}}" \\ - -C ${{STAGING_DIR}} \\ - --deb-no-default-config-files \\ - --depends "systemd" \\ - --vendor "Science Corporation" \\ - --description "Synapse Application" \\ - --architecture arm64 \\ - --after-install "${{SCRIPT_DIR}}/scripts/postinstall.sh" \\ - --before-remove "${{SCRIPT_DIR}}/scripts/preremove.sh" \\ - --after-remove "${{SCRIPT_DIR}}/scripts/postremove.sh" \\ - . -''') - os.chmod(package_script, 0o755) - - # Create basic systemd service file - service_file = os.path.join(ops_dir, 'package', 'systemd', f'{app_name}.service') - with open(service_file, 'w') as f: - f.write(f'''[Unit] -Description={app_name} service -After=network.target - -[Service] -ExecStart=/opt/scifi/scripts/launch_app.sh -Restart=on-failure -RestartSec=5 - -[Install] -WantedBy=multi-user.target -''') - - # Create launch script - launch_script = os.path.join(ops_dir, 'package', 'scripts', 'launch_app.sh') - with open(launch_script, 'w') as f: - f.write(f'''#!/bin/bash -set -e - -# Launch the application -/opt/scifi/bin/{app_name} -''') - os.chmod(launch_script, 0o755) - - # Create post-install script - postinstall_script = os.path.join(ops_dir, 'package', 'scripts', 'postinstall.sh') - with open(postinstall_script, 'w') as f: - f.write(f'''#!/bin/bash -set -e - -# Enable and start the service -systemctl daemon-reload -systemctl enable {app_name}.service -systemctl start {app_name}.service - -echo "{app_name} installed and started successfully" -''') - os.chmod(postinstall_script, 0o755) - - # Create pre-remove script - preremove_script = os.path.join(ops_dir, 'package', 'scripts', 'preremove.sh') - with open(preremove_script, 'w') as f: - f.write(f'''#!/bin/bash -set -e - -# Stop and disable the service -systemctl stop {app_name}.service -systemctl disable {app_name}.service - -echo "Stopped {app_name} service" -''') - os.chmod(preremove_script, 0o755) - - # Create post-remove script - postremove_script = os.path.join(ops_dir, 'package', 'scripts', 'postremove.sh') - with open(postremove_script, 'w') as f: - f.write(f'''#!/bin/bash -set -e - -# Reload systemd to remove the service -systemctl daemon-reload - -echo "Removed {app_name} service" -''') - os.chmod(postremove_script, 0o755) - - # Create basic Dockerfiles - dockerfile = os.path.join(ops_dir, 'docker', 'Dockerfile') - with open(dockerfile, 'w') as f: - f.write('''FROM ubuntu:22.04 - -ARG DEBIAN_FRONTEND=noninteractive - -# Install base dependencies -RUN apt-get update && apt-get install -y \\ - build-essential \\ - cmake \\ - pkg-config \\ - git \\ - ruby-dev \\ - curl \\ - jq \\ - && gem install fpm - -# Add a non-root user -RUN useradd -ms /bin/bash developer -USER developer -WORKDIR /home/workspace - -CMD ["/bin/bash"] -''') - - dockerfile_arm64 = os.path.join(ops_dir, 'docker', 'Dockerfile.arm64') - with open(dockerfile_arm64, 'w') as f: - f.write('''FROM ubuntu:22.04 - -ARG DEBIAN_FRONTEND=noninteractive - -# Install base dependencies -RUN apt-get update && apt-get install -y \\ - build-essential \\ - cmake \\ - pkg-config \\ - git \\ - ruby-dev \\ - curl \\ - jq \\ - && gem install fpm - -# Add a non-root user -RUN useradd -ms /bin/bash developer -USER developer -WORKDIR /home/workspace - -CMD ["/bin/bash"] -''') - - # Create manifest.json if it doesn't exist - manifest_path = os.path.join(app_dir, 'manifest.json') - if not os.path.exists(manifest_path): - manifest = { - "name": app_name, - "device_configuration": { - "nodes": [ - { - "type": "kApplicationNode", - "id": 2, - "application": { - "name": app_name - } - } - ] - } - } - - with open(manifest_path, 'w') as f: - json.dump(manifest, f, indent=2) - - print(f"Created basic manifest.json for {app_name}") - -def main(): - parser = argparse.ArgumentParser(description="Setup a new Synapse application structure") - parser.add_argument("app_name", help="Name of the application") - parser.add_argument("--app_dir", default=os.getcwd(), help="Directory to create the application in (default: current dir)") - - args = parser.parse_args() - - setup_app_structure(args.app_dir, args.app_name) - print(f"Application {args.app_name} setup complete!") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/synapse/templates/app/start_docker.sh b/synapse/templates/app/start_docker.sh deleted file mode 100644 index 34a82619..00000000 --- a/synapse/templates/app/start_docker.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -set -e - -# Detect architecture -ARCH=$(uname -m) -if [[ "${ARCH}" == "arm64" || "${ARCH}" == "aarch64" ]]; then - TAG_SUFFIX="arm64" -else - TAG_SUFFIX="amd64" -fi - -# Get the app name from the directory name or manifest.json -APP_NAME=$(basename "$(pwd)") -if [ -f "manifest.json" ]; then - # Try to extract the name from manifest.json - if command -v jq &> /dev/null; then - MANIFEST_NAME=$(jq -r '.name' manifest.json 2>/dev/null) - if [ -n "$MANIFEST_NAME" ] && [ "$MANIFEST_NAME" != "null" ]; then - APP_NAME=$MANIFEST_NAME - fi - fi -fi - -# Image name -IMAGE="${APP_NAME}:latest-${TAG_SUFFIX}" - -# Check if image exists -if ! docker image inspect $IMAGE >/dev/null 2>&1; then - echo "Image $IMAGE not found. Please run build_docker.sh first." - exit 1 -fi - -echo "Starting container for architecture: $ARCH" - -# Run the container with appropriate mounts -# Adjust volume mappings as needed for your project -docker run -it \ - --rm \ - -v "$(pwd):/home/workspace" \ - $IMAGE \ No newline at end of file From a51e855f5fc517e60cf07b0691d30fbbc3f31922 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Thu, 8 May 2025 15:49:53 -0700 Subject: [PATCH 09/25] remove paramiko checks, since synapsectl comes with it --- synapse/cli/deploy.py | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index cafa39a9..3d4f357a 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -1,5 +1,4 @@ import os -import sys import subprocess import shutil import json @@ -764,12 +763,6 @@ def deploy_cmd(args): if not ensure_docker(): return - # Ensure paramiko dependency is available - try: - ensure_paramiko() - except Exception: - return - # Get absolute path of app directory app_dir = os.path.abspath(args.app_dir) @@ -811,12 +804,6 @@ def deploy_cmd(args): def start_app_cmd(args): """Handle the start-app command""" - # Ensure paramiko dependency is available - try: - ensure_paramiko() - except Exception: - return - uri = getattr(args, "uri", None) if not uri: console.print( @@ -849,29 +836,6 @@ def add_commands(subparsers): # Helper utilities shared across this module # --------------------------------------------------------------------------- -def ensure_paramiko(): - """Import paramiko, installing it on the fly if it is missing. - - This logic was previously duplicated in multiple command handlers. The - helper makes the intent explicit and keeps the main code paths concise. - """ - try: - import importlib # noqa: WPS433 – used intentionally for runtime import - importlib.import_module("paramiko") - except ImportError: - console.print( - "[yellow]Required module 'paramiko' not found. Attempting to install...[/yellow]" - ) - try: - subprocess.check_call([sys.executable, "-m", "pip", "install", "paramiko>=2.7.2"]) - console.print("[green]Successfully installed paramiko.[/green]") - except Exception as exc: # pragma: no cover – best-effort installation - console.print(f"[bold red]Error:[/bold red] Failed to install paramiko: {exc}") - console.print("[yellow]Please manually install required dependencies:[/yellow]") - console.print("pip install paramiko>=2.7.2") - raise - - def get_synapse_root() -> str: """Return the absolute path to the *synapse-python* repository root.""" script_dir = os.path.dirname(os.path.abspath(__file__)) From d298ece73e3fd70292eb58bb74080b8fcc4ae8b9 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Thu, 8 May 2025 16:38:13 -0700 Subject: [PATCH 10/25] remove start_app stuff --- synapse/cli/deploy.py | 121 ------------------------------------------ 1 file changed, 121 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index 3d4f357a..857dbdb9 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -489,108 +489,6 @@ def save_credentials(ip_address, username, login_password, root_password): console.print(f"[yellow]Warning: Failed to save credentials: {e}[/yellow]") -def start_app(ip_address, app_name): - """Start the application on the device""" - # Stop any previous progress display - console.clear_live() - - # Get cached credentials or prompt for new ones - cached_ip, username, login_password, root_password = load_cached_credentials() - - # If no cached credentials or they don't match our target IP, prompt for new ones - if ( - not cached_ip - or cached_ip != ip_address - or not username - or not login_password - or not root_password - ): - username, login_password, root_password = get_device_credentials(ip_address) - - with Progress( - SpinnerColumn(), - TextColumn("[bold blue]{task.description}[/bold blue]"), - console=console, - ) as progress: - task = progress.add_task( - f"[yellow]Starting {app_name} on {ip_address}...", total=1 - ) - - try: - # Start the app using paramiko - import paramiko - - client = None - shell = None - - try: - # Create SSH client - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - - # Connect to the device - client.connect( - ip_address, username=username, password=login_password, timeout=10 - ) - - # Use interactive shell to run commands with sudo - shell = client.invoke_shell() - - # Send the command to start the application - shell.send(f"sudo systemctl start {app_name}.service\n") - time.sleep(2) # Wait for the command to execute - - # Collect output to check for errors - output = "" - while shell.recv_ready(): - chunk = shell.recv(4096).decode("utf-8") - output += chunk - - # If there's an error, we'll usually see it in the output - if "error" in output.lower() or "failed" in output.lower(): - progress.update(task, visible=False) - console.print( - f"[bold red]Error:[/bold red] Failed to start application:\n{output}" - ) - return False - - # Save successful credentials - save_credentials(ip_address, username, login_password, root_password) - - progress.update(task, advance=1) - - console.print( - Panel( - f"[bold green]Successfully started[/bold green] [yellow]{app_name}[/yellow] [bold green]on[/bold green] [blue]{ip_address}[/blue]", - title="Application Started", - border_style="green", - box=box.DOUBLE, - ) - ) - return True - - except Exception as e: - progress.update(task, visible=False) - console.print(f"[bold red]Error:[/bold red] {str(e)}") - return False - - except Exception as e: - progress.update(task, visible=False) - console.print( - f"[bold red]Error:[/bold red] Failed to start application: {e}" - ) - return False - finally: - # Clean up connections - try: - if shell: - shell.close() - if client: - client.close() - except: - pass - - def build_app(app_dir, app_name): """Build the application binary before packaging""" console.print(f"[yellow]Building application: {app_name}...[/yellow]") @@ -802,18 +700,6 @@ def deploy_cmd(args): console.print(f"[green]Package available at:[/green] {deb_package}") -def start_app_cmd(args): - """Handle the start-app command""" - uri = getattr(args, "uri", None) - if not uri: - console.print( - "[bold red]Error:[/bold red] No URI provided. Cannot start the application." - ) - return - - start_app(uri, args.app_name) - - def add_commands(subparsers): """Add deploy commands to the CLI""" # Deploy command @@ -825,13 +711,6 @@ def add_commands(subparsers): ) deploy_parser.set_defaults(func=deploy_cmd) - # Start app command - start_app_parser = subparsers.add_parser( - "start-app", help="Start an application on a Synapse device" - ) - start_app_parser.add_argument("app_name", help="Name of the application to start") - start_app_parser.set_defaults(func=start_app_cmd) - # --------------------------------------------------------------------------- # Helper utilities shared across this module # --------------------------------------------------------------------------- From 9010d62ecd7907675c13c71dedecb8b5cb050c9e Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Thu, 8 May 2025 18:36:02 -0700 Subject: [PATCH 11/25] add manifest to build --- synapse/cli/rpc.py | 49 +++++++++++++++++--- synapse/templates/app/ops/package/package.sh | 13 ++++++ 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/synapse/cli/rpc.py b/synapse/cli/rpc.py index f955c5dc..6453fe9a 100644 --- a/synapse/cli/rpc.py +++ b/synapse/cli/rpc.py @@ -29,10 +29,22 @@ def add_commands(subparsers): b.set_defaults(func=query) - c = subparsers.add_parser("start", help="Start the device") + c = subparsers.add_parser("start", help="Start the device or an application") + c.add_argument( + "app_name", + nargs="?", + default=None, + help="Name of the application as specified in its manifest.json. If provided, the tool will attempt to locate the manifest, configure the device using its 'device_configuration', and then start the device.", + ) c.set_defaults(func=start) - d = subparsers.add_parser("stop", help="Stop the device") + d = subparsers.add_parser("stop", help="Stop the device or an application") + d.add_argument( + "app_name", + nargs="?", + default=None, + help="Name of the application to stop (systemd service). If omitted, stops the whole device via RPC.", + ) d.set_defaults(func=stop) e = subparsers.add_parser("configure", help="Write a configuration to the device") @@ -137,20 +149,45 @@ def load_query_request(path_to_config): def start(args): + """Start the Synapse device (and any application services managed by + *ApplicationControllerNode*). If an ``app_name`` is supplied we still just + issue the standard *Device.start* RPC – the controller node on-device will + decide which systemd service to launch. + """ + console = Console() + + if getattr(args, "app_name", None): + console.print( + f"[yellow]Starting device; application '{args.app_name}' will be " + "started by the on-device ApplicationController.[/yellow]" + ) + with console.status("Starting device...", spinner="bouncingBall"): - stop_ret = syn.Device(args.uri, args.verbose).start_with_status() - if not stop_ret: + start_ret = syn.Device(args.uri, args.verbose).start_with_status() + if start_ret is None: console.print("[bold red]Internal error starting device") return - if stop_ret.code != StatusCode.kOk: - console.print(f"[bold red]Error starting\n{stop_ret.message}") + if start_ret.code != StatusCode.kOk: + console.print(f"[bold red]Error starting device[/bold red]\n{start_ret.message}") return + console.print("[green]Device Started") def stop(args): + """Stop the Synapse device and, by extension, any application services + controlled by ApplicationControllerNode. + """ + console = Console() + + if getattr(args, "app_name", None): + console.print( + f"[yellow]Stopping device; application '{args.app_name}' will be " + "shut down by the on-device ApplicationController.[/yellow]" + ) + with console.status("Stopping device...", spinner="bouncingBall"): stop_ret = syn.Device(args.uri, args.verbose).stop_with_status() if not stop_ret: diff --git a/synapse/templates/app/ops/package/package.sh b/synapse/templates/app/ops/package/package.sh index 7c29dfaa..419c2c87 100755 --- a/synapse/templates/app/ops/package/package.sh +++ b/synapse/templates/app/ops/package/package.sh @@ -54,6 +54,19 @@ cp "${SCRIPT_DIR}/scripts/launch_synapse_app.sh" "${STAGING_DIR}/opt/scifi/scrip mkdir -p ${STAGING_DIR}/etc/systemd/system cp "${SCRIPT_DIR}/systemd/${SYNAPSE_APP_EXE}.service" "${STAGING_DIR}/etc/systemd/system/" +# --------------------------------------------------------------------------- +# Copy application manifest so the device can reference it later +# Destination: /opt/scifi/config/manifests/.json +# --------------------------------------------------------------------------- +MANIFEST_SRC="${SOURCE_DIR}/manifest.json" +if [ -f "${MANIFEST_SRC}" ]; then + MANIFEST_DST_DIR="${STAGING_DIR}/opt/scifi/config/manifests" + mkdir -p "${MANIFEST_DST_DIR}" + cp "${MANIFEST_SRC}" "${MANIFEST_DST_DIR}/${SYNAPSE_APP_EXE}.json" +else + echo "Warning: manifest.json not found at ${MANIFEST_SRC}; skipping copy." +fi + fpm -s dir -t deb \ -n "${SYNAPSE_APP_EXE}" \ -f \ From 4fc08ba21109e3f849de5a479a886204a42f1ef6 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Fri, 9 May 2025 15:22:43 -0700 Subject: [PATCH 12/25] remove some priority stuff --- .../ops/package/scripts/launch_synapse_app.sh | 20 ------------------- .../app/ops/package/scripts/postinstall.sh | 1 - 2 files changed, 21 deletions(-) diff --git a/synapse/templates/app/ops/package/scripts/launch_synapse_app.sh b/synapse/templates/app/ops/package/scripts/launch_synapse_app.sh index c058ae38..e314f809 100755 --- a/synapse/templates/app/ops/package/scripts/launch_synapse_app.sh +++ b/synapse/templates/app/ops/package/scripts/launch_synapse_app.sh @@ -6,30 +6,10 @@ SYNAPSE_APP_EXE="{{APP_NAME}}" MANIFEST_FILE="/opt/scifi/config/app_manifest.json" -# set the process priority to something high -if ! renice -n -10 $$ > /dev/null 2>&1; then - echo "Failed to set process priority" - exit 1 -fi - -# Set CPU scheduler to FIFO -# Might drop this down if the system is unstable -if ! chrt -f -p 50 $$ > /dev/null 2>&1; then - echo "Failed to set CPU scheduler to FIFO" - exit 1 -fi - -# Note: Uncomment to set the CPU affinity to specific cores -# taskset -c 0-3 $$ > /dev/null 2>&1 - -# Set maximum locked memory to unlimited -# ulimit -l unlimited - # Set max UDP write buffer size to 4MB sysctl -w net.core.wmem_max=4194304 sysctl -w net.core.wmem_default=4194304 - # Set up LD_LIBRARY_PATH to prefer our local libraries and user libraries export LD_LIBRARY_PATH=/opt/scifi/usr-libs:/opt/scifi/lib:$LD_LIBRARY_PATH diff --git a/synapse/templates/app/ops/package/scripts/postinstall.sh b/synapse/templates/app/ops/package/scripts/postinstall.sh index f1b73e56..ef39fd3e 100755 --- a/synapse/templates/app/ops/package/scripts/postinstall.sh +++ b/synapse/templates/app/ops/package/scripts/postinstall.sh @@ -14,4 +14,3 @@ chmod 755 /opt/scifi/bin/"${SYNAPSE_APP_EXE}" # Reload and start the service systemctl daemon-reload -systemctl enable "${SYNAPSE_APP_EXE}" \ No newline at end of file From e5e834eb73ea81d0746050320c8ad450cf5d5738 Mon Sep 17 00:00:00 2001 From: Gilbert Montague Date: Fri, 9 May 2025 15:42:06 -0700 Subject: [PATCH 13/25] updated package to include synapse app sdk --- synapse/templates/app/ops/package/package.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/synapse/templates/app/ops/package/package.sh b/synapse/templates/app/ops/package/package.sh index 419c2c87..e5aa73f6 100755 --- a/synapse/templates/app/ops/package/package.sh +++ b/synapse/templates/app/ops/package/package.sh @@ -35,7 +35,7 @@ done if [ "$BINARY_FOUND" = false ]; then echo "Binary not found in standard build directories, searching source directory..." BINARY_PATH=$(find "${SOURCE_DIR}" -name "${SYNAPSE_APP_EXE}" -type f | grep -v "${STAGING_DIR}" | head -n 1) - + if [ -n "$BINARY_PATH" ]; then echo "Found binary at ${BINARY_PATH}" cp "${BINARY_PATH}" "${STAGING_DIR}/opt/scifi/bin/" @@ -54,6 +54,12 @@ cp "${SCRIPT_DIR}/scripts/launch_synapse_app.sh" "${STAGING_DIR}/opt/scifi/scrip mkdir -p ${STAGING_DIR}/etc/systemd/system cp "${SCRIPT_DIR}/systemd/${SYNAPSE_APP_EXE}.service" "${STAGING_DIR}/etc/systemd/system/" +# App SDK +APP_SDK_DOCKER_DIR="/usr/lib/" +APP_SDK_LIB_TARGET_DIR="${STAGING_DIR}/opt/scifi/lib" +mkdir -p ${APP_SDK_LIB_TARGET_DIR} +find "${APP_SDK_DOCKER_DIR}" -name "libsynapse*.so*" -type f -exec cp -v {} ${APP_SDK_LIB_TARGET_DIR}/ \; + # --------------------------------------------------------------------------- # Copy application manifest so the device can reference it later # Destination: /opt/scifi/config/manifests/.json @@ -80,4 +86,4 @@ fpm -s dir -t deb \ --after-install "${SCRIPT_DIR}/scripts/postinstall.sh" \ --before-remove "${SCRIPT_DIR}/scripts/preremove.sh" \ --after-remove "${SCRIPT_DIR}/scripts/postremove.sh" \ - . \ No newline at end of file + . \ No newline at end of file From 34bf82aa4cbb14f9f5e66ebaa705cdd9ace33edf Mon Sep 17 00:00:00 2001 From: Gilbert Montague Date: Fri, 9 May 2025 16:43:58 -0700 Subject: [PATCH 14/25] Fix sdk copy --- synapse/templates/app/ops/package/package.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/templates/app/ops/package/package.sh b/synapse/templates/app/ops/package/package.sh index e5aa73f6..515d2626 100755 --- a/synapse/templates/app/ops/package/package.sh +++ b/synapse/templates/app/ops/package/package.sh @@ -58,7 +58,7 @@ cp "${SCRIPT_DIR}/systemd/${SYNAPSE_APP_EXE}.service" "${STAGING_DIR}/etc/system APP_SDK_DOCKER_DIR="/usr/lib/" APP_SDK_LIB_TARGET_DIR="${STAGING_DIR}/opt/scifi/lib" mkdir -p ${APP_SDK_LIB_TARGET_DIR} -find "${APP_SDK_DOCKER_DIR}" -name "libsynapse*.so*" -type f -exec cp -v {} ${APP_SDK_LIB_TARGET_DIR}/ \; +find "${APP_SDK_DOCKER_DIR}" -name "libsynapse*.so*" -exec cp -av {} ${APP_SDK_LIB_TARGET_DIR}/ \; # --------------------------------------------------------------------------- # Copy application manifest so the device can reference it later From d98898ba97d1abfde9eace90acb1c1dffa8379c0 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Fri, 9 May 2025 17:23:48 -0700 Subject: [PATCH 15/25] affordance for configure + start --- synapse/cli/rpc.py | 51 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/synapse/cli/rpc.py b/synapse/cli/rpc.py index 6453fe9a..02ce8118 100644 --- a/synapse/cli/rpc.py +++ b/synapse/cli/rpc.py @@ -31,10 +31,15 @@ def add_commands(subparsers): c = subparsers.add_parser("start", help="Start the device or an application") c.add_argument( - "app_name", + "config_file", nargs="?", default=None, - help="Name of the application as specified in its manifest.json. If provided, the tool will attempt to locate the manifest, configure the device using its 'device_configuration', and then start the device.", + help=( + "Optional path to a device configuration JSON file. If supplied, " + "the CLI first uploads the configuration, then starts the device. " + "Running `synapsectl start` with no argument simply starts the " + "device without re-configuring it." + ), ) c.set_defaults(func=start) @@ -157,14 +162,44 @@ def start(args): console = Console() - if getattr(args, "app_name", None): - console.print( - f"[yellow]Starting device; application '{args.app_name}' will be " - "started by the on-device ApplicationController.[/yellow]" - ) + config_obj = None # syn.Config if we are provided a *.json* file + cfg_path = getattr(args, "config_file", None) + + if cfg_path: + if Path(cfg_path).suffix != ".json": + console.print("[bold red]Configuration file must be a JSON file (.json)") + return + + if not Path(cfg_path).is_file(): + console.print(f"[bold red]Configuration file {cfg_path} does not exist") + return + + # Load the configuration proto and build Config object + try: + with open(cfg_path, "r") as f: + json_text = f.read() + cfg_proto = Parse(json_text, DeviceConfiguration()) + config_obj = syn.Config.from_proto(cfg_proto) + except Exception as e: + console.print(f"[bold red]Failed to parse configuration file[/bold red]: {e}") + return + + device = syn.Device(args.uri, args.verbose) + + # If we have a configuration, apply it first. + if config_obj is not None: + with console.status("Configuring device...", spinner="bouncingBall"): + cfg_ret = device.configure_with_status(config_obj) + if cfg_ret is None: + console.print("[bold red]Internal error configuring device") + return + if cfg_ret.code != StatusCode.kOk: + console.print(f"[bold red]Error configuring device[/bold red]\n{cfg_ret.message}") + return + console.print("[green]Device Configured") with console.status("Starting device...", spinner="bouncingBall"): - start_ret = syn.Device(args.uri, args.verbose).start_with_status() + start_ret = device.start_with_status() if start_ret is None: console.print("[bold red]Internal error starting device") return From 38ffa1639adaccbcdce25d8592afe3d5d7387aa9 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Fri, 9 May 2025 17:43:07 -0700 Subject: [PATCH 16/25] keep app manifests entirely client-side --- synapse/templates/app/ops/package/package.sh | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/synapse/templates/app/ops/package/package.sh b/synapse/templates/app/ops/package/package.sh index 515d2626..61ab7b3d 100755 --- a/synapse/templates/app/ops/package/package.sh +++ b/synapse/templates/app/ops/package/package.sh @@ -60,19 +60,6 @@ APP_SDK_LIB_TARGET_DIR="${STAGING_DIR}/opt/scifi/lib" mkdir -p ${APP_SDK_LIB_TARGET_DIR} find "${APP_SDK_DOCKER_DIR}" -name "libsynapse*.so*" -exec cp -av {} ${APP_SDK_LIB_TARGET_DIR}/ \; -# --------------------------------------------------------------------------- -# Copy application manifest so the device can reference it later -# Destination: /opt/scifi/config/manifests/.json -# --------------------------------------------------------------------------- -MANIFEST_SRC="${SOURCE_DIR}/manifest.json" -if [ -f "${MANIFEST_SRC}" ]; then - MANIFEST_DST_DIR="${STAGING_DIR}/opt/scifi/config/manifests" - mkdir -p "${MANIFEST_DST_DIR}" - cp "${MANIFEST_SRC}" "${MANIFEST_DST_DIR}/${SYNAPSE_APP_EXE}.json" -else - echo "Warning: manifest.json not found at ${MANIFEST_SRC}; skipping copy." -fi - fpm -s dir -t deb \ -n "${SYNAPSE_APP_EXE}" \ -f \ From 575a7dfe523eba2571e6e60e00673f514a5078d1 Mon Sep 17 00:00:00 2001 From: Gilbert Montague Date: Fri, 9 May 2025 17:43:20 -0700 Subject: [PATCH 17/25] Remove check for docker build image --- synapse/cli/deploy.py | 85 +++++++++++++++++++++---------------------- 1 file changed, 42 insertions(+), 43 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index 857dbdb9..5ddb8543 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -94,13 +94,13 @@ def package_app(app_dir, app_name): # We're outside Docker, need to use docker to package # Always use the build_docker.sh from the synapse-python package directly build_docker_script = get_build_docker_script() - + if not os.path.exists(build_docker_script): console.print( f"[bold red]Error:[/bold red] Could not find Docker build script at {build_docker_script}" ) return False - + # Make sure the script is executable os.chmod(build_docker_script, 0o755) @@ -129,13 +129,17 @@ def package_app(app_dir, app_name): ) # Log any warnings emitted by the build step so they are not lost - if result.stderr and any(word in result.stderr.lower() for word in ("error", "fail")): + if result.stderr and any( + word in result.stderr.lower() for word in ("error", "fail") + ): console.print(f"[bold red]Warning:[/bold red] {result.stderr}") # Mark the *build* step as complete progress.update(build_task, advance=1) except subprocess.CalledProcessError as exc: - console.print(f"[bold red]Error:[/bold red] Failed to build Docker image: {exc}") + console.print( + f"[bold red]Error:[/bold red] Failed to build Docker image: {exc}" + ) if exc.stderr: console.print(f"[red]{exc.stderr}[/red]") return False @@ -143,11 +147,13 @@ def package_app(app_dir, app_name): # ------------------------------------------------------------------ # STEP 2: Package the application inside the freshly-built container # ------------------------------------------------------------------ - package_task = progress.add_task("[yellow]Packaging application...", total=1) + package_task = progress.add_task( + "[yellow]Packaging application...", total=1 + ) tag_suffix = detect_arch() image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" - + # Compose a bash script that prepares the template files and then runs the # packaging script. All placeholder replacements and SOURCE_DIR # overrides happen entirely inside the container so that nothing ever @@ -430,7 +436,7 @@ def deploy_package(ip_address, deb_package_path): sftp.close() if client: client.close() - except: + except Exception: pass @@ -515,43 +521,31 @@ def build_app(app_dir, app_name): # Image name image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" - # Check if Docker image exists - try: - subprocess.run( - ["docker", "image", "inspect", image], - check=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - console.print(f"[green]Docker image {image} already exists.[/green]") - except subprocess.CalledProcessError: - # Docker image doesn't exist, build it + # Docker image doesn't exist, build it + console.print( + f"[yellow]Docker image {image} not found, building it first...[/yellow]" + ) + + # Always use the shared build_docker.sh script + build_docker_script = get_build_docker_script() + + if not os.path.exists(build_docker_script): console.print( - f"[yellow]Docker image {image} not found, building it first...[/yellow]" + f"[bold red]Error:[/bold red] Could not find Docker build script at {build_docker_script}" ) - - # Always use the shared build_docker.sh script - build_docker_script = get_build_docker_script() - - if not os.path.exists(build_docker_script): - console.print( - f"[bold red]Error:[/bold red] Could not find Docker build script at {build_docker_script}" - ) - return False - - # Make sure the script is executable - os.chmod(build_docker_script, 0o755) - - try: - # Run the build script without capturing output so user can see progress - console.print("[blue]Running build_docker.sh...[/blue]") - subprocess.run(["bash", build_docker_script], check=True, cwd=app_dir) - console.print("[green]Successfully built Docker image.[/green]") - except subprocess.CalledProcessError as e: - console.print( - f"[bold red]Error:[/bold red] Failed to build Docker image: {e}" - ) - return False + return False + + # Make sure the script is executable + os.chmod(build_docker_script, 0o755) + + try: + # Run the build script without capturing output so user can see progress + console.print("[blue]Running build_docker.sh...[/blue]") + subprocess.run(["bash", build_docker_script], check=True, cwd=app_dir) + console.print("[green]Successfully built Docker image.[/green]") + except subprocess.CalledProcessError as e: + console.print(f"[bold red]Error:[/bold red] Failed to build Docker image: {e}") + return False # Now build the app in Docker console.print("[yellow]Building application in Docker container...[/yellow]") @@ -711,10 +705,12 @@ def add_commands(subparsers): ) deploy_parser.set_defaults(func=deploy_cmd) + # --------------------------------------------------------------------------- # Helper utilities shared across this module # --------------------------------------------------------------------------- + def get_synapse_root() -> str: """Return the absolute path to the *synapse-python* repository root.""" script_dir = os.path.dirname(os.path.abspath(__file__)) @@ -723,7 +719,9 @@ def get_synapse_root() -> str: def get_build_docker_script() -> str: """Return the canonical *build_docker.sh* path used throughout the tool.""" - return os.path.join(get_synapse_root(), "synapse", "templates", "app", "build_docker.sh") + return os.path.join( + get_synapse_root(), "synapse", "templates", "app", "build_docker.sh" + ) def get_template_ops_dir() -> str: @@ -741,6 +739,7 @@ def detect_arch() -> str: # Environment sanity-check helpers # --------------------------------------------------------------------------- + def ensure_docker() -> bool: """Return True if the *docker* CLI is available and the daemon responds. From 939fb7b1a52984fb5684dbec8967e3119b596364 Mon Sep 17 00:00:00 2001 From: Sage Date: Fri, 9 May 2025 18:07:05 -0700 Subject: [PATCH 18/25] cleanup sftp --- synapse/cli/deploy.py | 39 ++++++++++++--------------------------- 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index 5ddb8543..e49c372c 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -16,6 +16,8 @@ from rich.prompt import Prompt from rich import box +import synapse.client.sftp as sftp + # Set up console for normal output and a separate one for logs console = Console() log_console = Console(stderr=True) @@ -304,31 +306,18 @@ def deploy_package(ip_address, deb_package_path): ) try: - # Deploy directly using paramiko - client = None - sftp = None shell = None - # Create SSH client - import paramiko - - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - # Connect to the device (connection task) connect_task = progress.add_task("[green]Connecting to device...", total=1) - - try: - client.connect( - ip_address, username=username, password=login_password, timeout=10 - ) - progress.update(connect_task, completed=1) - progress.update(deploy_task, advance=1) - except Exception as e: + client, sftp_conn = sftp.connect_sftp( + hostname=ip_address, username=username, password=login_password + ) + progress.update(connect_task, completed=1) + progress.update(deploy_task, advance=1) + if client is None or sftp_conn is None: progress.update(connect_task, visible=False) - console.print( - f"[bold red]Error connecting to {ip_address}:[/bold red] {str(e)}" - ) + console.print(f"[bold red]Error connecting to {ip_address}[/bold red]") console.print( "[yellow]Please check your username and password.[/yellow]" ) @@ -339,9 +328,8 @@ def deploy_package(ip_address, deb_package_path): try: # Create SFTP client and upload - sftp = client.open_sftp() remote_path = f"/tmp/{package_filename}" - sftp.put(deb_package_path, remote_path) + sftp_conn.put(deb_package_path, remote_path) progress.update(upload_task, completed=1) progress.update(deploy_task, advance=1) except Exception as e: @@ -430,12 +418,9 @@ def deploy_package(ip_address, deb_package_path): finally: # Clean up connections try: - if shell: + sftp.close_sftp(client, sftp_conn) + if shell is not None: shell.close() - if sftp: - sftp.close() - if client: - client.close() except Exception: pass From fec1b65efb1401330e05d067c4b6ba6a63bcfa19 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Fri, 9 May 2025 18:08:32 -0700 Subject: [PATCH 19/25] move inline bash into a file --- synapse/cli/deploy.py | 49 ++++++------------- .../templates/app/ops/prepare_and_package.sh | 48 ++++++++++++++++++ 2 files changed, 62 insertions(+), 35 deletions(-) create mode 100755 synapse/templates/app/ops/prepare_and_package.sh diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index 5ddb8543..dd6aaf84 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -154,39 +154,18 @@ def package_app(app_dir, app_name): tag_suffix = detect_arch() image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" - # Compose a bash script that prepares the template files and then runs the - # packaging script. All placeholder replacements and SOURCE_DIR - # overrides happen entirely inside the container so that nothing ever - # gets written back to the application repository. - - bash_cmd = f''' -set -e -APP_NAME="{app_name}" - -# Copy the template ops directory to a temporary working area inside the container -TEMPLATE_DIR="/synapse_ops" -TEMP_DIR="/tmp/synapse_package_ops" -rm -rf "$TEMP_DIR" -cp -r "$TEMPLATE_DIR" "$TEMP_DIR" - -# Replace placeholders in every file -find "$TEMP_DIR" -type f -exec sed -i 's/{{{{APP_NAME}}}}/'"$APP_NAME"'/g' {{}} + - -# Rename the systemd service template to the correct name -# The template file is literally called "{{APP_NAME}}.service" (double braces). -if [ -f "$TEMP_DIR/package/systemd/{{{{APP_NAME}}}}.service" ]; then - mv "$TEMP_DIR/package/systemd/{{{{APP_NAME}}}}.service" "$TEMP_DIR/package/systemd/$APP_NAME.service" -fi - -# Ensure the packaging script is executable and points to the correct source dir -chmod +x "$TEMP_DIR/package/package.sh" -sed -i 's|SOURCE_DIR=.*|SOURCE_DIR="/home/workspace"|' "$TEMP_DIR/package/package.sh" - -# Finally, run the packaging script from the workspace root so that the .deb lands -# in the application directory that is mounted from the host. -cd /home/workspace -bash "$TEMP_DIR/package/package.sh" -''' + # Ensure the helper script exists and is executable on the host so it + # can be executed inside the container. + prepare_script = os.path.join(template_ops_dir, "prepare_and_package.sh") + if not os.path.exists(prepare_script): + console.print( + f"[bold red]Error:[/bold red] Helper script not found at {prepare_script}" + ) + return False + + # Make sure the script has execute permissions (mostly relevant for + # Windows or freshly-cloned repos). + os.chmod(prepare_script, 0o755) cmd = [ "docker", @@ -199,8 +178,8 @@ def package_app(app_dir, app_name): f"{template_ops_dir}:/synapse_ops:ro", image, "/bin/bash", - "-c", - bash_cmd, + "/synapse_ops/prepare_and_package.sh", + app_name, ] # Run the packaging script in Docker - capture output diff --git a/synapse/templates/app/ops/prepare_and_package.sh b/synapse/templates/app/ops/prepare_and_package.sh new file mode 100755 index 00000000..ccbbdd40 --- /dev/null +++ b/synapse/templates/app/ops/prepare_and_package.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -e + +# --------------------------------------------------------------------------- +# prepare_and_package.sh +# --------------------------------------------------------------------------- +# This script copies the shared *ops* template directory to a temporary working +# location, replaces placeholder tokens with the supplied application name, and +# then runs the main `package.sh` script so that a Debian package is produced. +# +# Usage (inside the Docker packaging container): +# /synapse_ops/prepare_and_package.sh +# --------------------------------------------------------------------------- + +if [[ $# -ne 1 ]]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +APP_NAME="$1" + +# The template directory is mounted read-only by the CLI. Work on a copy so we +# can make in-place edits while preserving the pristine original. +TEMPLATE_DIR="/synapse_ops" +TEMP_DIR="/tmp/synapse_package_ops" +rm -rf "$TEMP_DIR" +cp -r "$TEMPLATE_DIR" "$TEMP_DIR" + +# Replace all occurrences of the placeholder token in every file of the copied +# template directory. +find "$TEMP_DIR" -type f -exec sed -i "s/{{APP_NAME}}/${APP_NAME}/g" {} + + +# The systemd service file itself has the placeholder in its *filename* so we +# need to rename it as well. +SERVICE_TEMPLATE="$TEMP_DIR/package/systemd/{{APP_NAME}}.service" +if [[ -f "$SERVICE_TEMPLATE" ]]; then + mv "$SERVICE_TEMPLATE" "$TEMP_DIR/package/systemd/${APP_NAME}.service" +fi + +# The main packaging script expects SOURCE_DIR to point at the application +# workspace mounted from the host. Ensure the variable is set correctly and +# that the script is executable. +chmod +x "$TEMP_DIR/package/package.sh" +sed -i 's|SOURCE_DIR=.*|SOURCE_DIR="/home/workspace"|' "$TEMP_DIR/package/package.sh" + +# Kick off the actual `.deb` creation process. +cd /home/workspace +bash "$TEMP_DIR/package/package.sh" \ No newline at end of file From f5545e5a8f0aaafb26ecc5bb5d522b58ca192265 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Fri, 9 May 2025 19:15:53 -0700 Subject: [PATCH 20/25] move everything into python, remove shell scripts --- synapse/cli/deploy.py | 572 +++++++++++------- synapse/templates/app/build_docker.sh | 37 -- synapse/templates/app/ops/package/package.sh | 76 --- .../ops/package/scripts/launch_synapse_app.sh | 24 - .../app/ops/package/scripts/postinstall.sh | 16 - .../app/ops/package/scripts/postremove.sh | 5 - .../app/ops/package/scripts/preremove.sh | 9 - .../ops/package/systemd/{{APP_NAME}}.service | 27 - .../templates/app/ops/prepare_and_package.sh | 48 -- 9 files changed, 340 insertions(+), 474 deletions(-) delete mode 100755 synapse/templates/app/build_docker.sh delete mode 100755 synapse/templates/app/ops/package/package.sh delete mode 100755 synapse/templates/app/ops/package/scripts/launch_synapse_app.sh delete mode 100755 synapse/templates/app/ops/package/scripts/postinstall.sh delete mode 100755 synapse/templates/app/ops/package/scripts/postremove.sh delete mode 100755 synapse/templates/app/ops/package/scripts/preremove.sh delete mode 100644 synapse/templates/app/ops/package/systemd/{{APP_NAME}}.service delete mode 100755 synapse/templates/app/ops/prepare_and_package.sh diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index e9f95080..0ddebb54 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -2,15 +2,15 @@ import subprocess import shutil import json -import time import logging +import tempfile +import glob from rich.console import Console from rich.panel import Panel from rich.progress import ( Progress, SpinnerColumn, TextColumn, - BarColumn, TimeElapsedColumn, ) from rich.prompt import Prompt @@ -50,166 +50,202 @@ def validate_manifest(manifest_path): return False -def package_app(app_dir, app_name): - """Package the application into a .deb file""" - # Check if we're in a Docker container - if os.path.exists("/.dockerenv"): - # We're inside Docker, directly run the package script - package_script = os.path.join(app_dir, "ops", "package", "package.sh") - if not os.path.exists(package_script): - console.print( - f"[bold red]Error:[/bold red] Package script not found at {package_script}" - ) - return False +def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bool: + """Create a *.deb* package for *app_name* and place it in *app_dir*. - # Make sure the script is executable - os.chmod(package_script, 0o755) - - # Make sure all the other scripts are executable too - script_dir = os.path.join(app_dir, "ops", "package", "scripts") - if os.path.exists(script_dir): - for script in os.listdir(script_dir): - if script.endswith(".sh"): - script_path = os.path.join(script_dir, script) - os.chmod(script_path, 0o755) - - with Progress( - SpinnerColumn(), - TextColumn("[bold blue]{task.description}[/bold blue]"), - BarColumn(), - TimeElapsedColumn(), - console=console, - ) as progress: - task = progress.add_task("[yellow]Packaging application...", total=1) - - # Run the package script - try: - subprocess.run(["bash", package_script], check=True, cwd=app_dir) - progress.update(task, advance=1) - return True - except subprocess.CalledProcessError as e: - console.print( - f"[bold red]Error:[/bold red] Failed to package application: {e}" - ) - return False - else: - # We're outside Docker, need to use docker to package - # Always use the build_docker.sh from the synapse-python package directly - build_docker_script = get_build_docker_script() + Returns ``True`` on success, ``False`` otherwise. + """ - if not os.path.exists(build_docker_script): - console.print( - f"[bold red]Error:[/bold red] Could not find Docker build script at {build_docker_script}" - ) - return False + try: + staging_dir = tempfile.mkdtemp(prefix="synapse-package-") - # Make sure the script is executable - os.chmod(build_docker_script, 0o755) + # ------------------------------------------------------------------ + # 1. Locate the compiled binary and copy it to /opt/scifi/bin + # ------------------------------------------------------------------ + possible_bins = [ + os.path.join(app_dir, "build-aarch64", app_name), + os.path.join(app_dir, "build", app_name), + os.path.join(app_dir, "build-arm64", app_name), + ] - # Path to ops templates inside synapse-python - template_ops_dir = get_template_ops_dir() + binary_path = next((p for p in possible_bins if os.path.exists(p)), None) - with Progress( - SpinnerColumn(), - TextColumn("[bold blue]{task.description}[/bold blue]"), - BarColumn(), - TimeElapsedColumn(), - console=console, - ) as progress: - build_task = progress.add_task("[yellow]Building Docker image...", total=1) + if binary_path is None: + console.print( + f"[bold red]Error:[/bold red] Compiled binary '{app_name}' not found." + ) + return False - # ------------------------------------------------------------------ - # STEP 1: Build the Docker image used for packaging the application - # ------------------------------------------------------------------ + bin_dst_dir = os.path.join(staging_dir, "opt", "scifi", "bin") + os.makedirs(bin_dst_dir, exist_ok=True) + shutil.copy2(binary_path, os.path.join(bin_dst_dir, app_name)) + + # ------------------------------------------------------------------ + # 2. Generate systemd service & lifecycle scripts on the fly + # ------------------------------------------------------------------ + + # Generate systemd unit + svc_content = f"""[Unit] +Description=Synapse Application +After=network-online.target +Wants=network-online.target +Requires=systemd-udevd.service +After=systemd-udevd.service + +[Service] +Type=simple +User=root +Restart=no +ExecStartPre=/sbin/sysctl -w net.core.wmem_max=4194304 +ExecStartPre=/sbin/sysctl -w net.core.wmem_default=4194304 +Environment=LD_LIBRARY_PATH=/opt/scifi/usr-libs:/opt/scifi/lib +Environment=SCIFI_ROOT=/opt/scifi +ExecStart=/opt/scifi/bin/{app_name} +WorkingDirectory=/opt/scifi + +[Install] +WantedBy=multi-user.target +""" + + svc_dst = os.path.join( + staging_dir, "etc", "systemd", "system", f"{app_name}.service" + ) + os.makedirs(os.path.dirname(svc_dst), exist_ok=True) + with open(svc_dst, "w", encoding="utf-8") as f: + f.write(svc_content) + + lifecycle_scripts_tmp = [] + + postinstall_path = os.path.join(staging_dir, "postinstall.sh") + with open(postinstall_path, "w", encoding="utf-8") as f: + f.write("#!/bin/bash\nset -e\nsystemctl daemon-reload\n") + os.chmod(postinstall_path, 0o755) + lifecycle_scripts_tmp.append(postinstall_path) + + preremove_path = os.path.join(staging_dir, "preremove.sh") + with open(preremove_path, "w", encoding="utf-8") as f: + f.write( + f"#!/bin/bash\nset -e\nsystemctl stop {app_name} || true\nsystemctl disable {app_name} || true\n" + ) + os.chmod(preremove_path, 0o755) + lifecycle_scripts_tmp.append(preremove_path) + + postremove_path = os.path.join(staging_dir, "postremove.sh") + with open(postremove_path, "w", encoding="utf-8") as f: + f.write("#!/bin/bash\nset -e\nsystemctl daemon-reload\n") + os.chmod(postremove_path, 0o755) + lifecycle_scripts_tmp.append(postremove_path) + + # ------------------------------------------------------------------ + # 3. Copy user-space Synapse SDK shared libs + # ------------------------------------------------------------------ + lib_dst_dir = os.path.join(staging_dir, "opt", "scifi", "lib") + os.makedirs(lib_dst_dir, exist_ok=True) + for lib in glob.glob("/usr/lib/libsynapse*.so*"): try: - result = subprocess.run( - ["bash", build_docker_script], - check=True, - cwd=app_dir, - capture_output=True, - text=True, - ) - - # Log any warnings emitted by the build step so they are not lost - if result.stderr and any( - word in result.stderr.lower() for word in ("error", "fail") - ): - console.print(f"[bold red]Warning:[/bold red] {result.stderr}") - - # Mark the *build* step as complete - progress.update(build_task, advance=1) - except subprocess.CalledProcessError as exc: - console.print( - f"[bold red]Error:[/bold red] Failed to build Docker image: {exc}" - ) - if exc.stderr: - console.print(f"[red]{exc.stderr}[/red]") - return False - - # ------------------------------------------------------------------ - # STEP 2: Package the application inside the freshly-built container - # ------------------------------------------------------------------ - package_task = progress.add_task( - "[yellow]Packaging application...", total=1 + shutil.copy2(lib, lib_dst_dir) + except PermissionError: + console.print(f"[yellow]Skipping lib copy (perm): {lib}[/yellow]") + + # ------------------------------------------------------------------ + # 4. Build the .deb with FPM + # ------------------------------------------------------------------ + arch = detect_arch() + + fpm_cmd = [ + "fpm", + "-s", + "dir", + "-t", + "deb", + "-n", + app_name, + "-f", + "-v", + version, + "-C", + staging_dir, + "--deb-no-default-config-files", + "--depends", + "systemd", + "--vendor", + "Science Corporation", + "--description", + "Synapse Application", + "--architecture", + arch, + ] + + # Attach lifecycle scripts (referenced relative to /pkg inside container) + script_map = { + "postinstall.sh": "--after-install", + "preremove.sh": "--before-remove", + "postremove.sh": "--after-remove", + } + for path in lifecycle_scripts_tmp: + opt = script_map.get(os.path.basename(path)) + if opt: + container_path = f"/pkg/{os.path.basename(path)}" + fpm_cmd.extend([opt, container_path]) + + fpm_cmd.append(".") + + # ------------------------------------------------------------------ + # 5. Invoke FPM in a Docker container (consistent across hosts) + # ------------------------------------------------------------------ + + fpm_image = "cdrx/fpm-ubuntu:latest" + console.print(f"[yellow]Running FPM (Docker image: {fpm_image}) ...[/yellow]") + + # Replace the host-specific staging dir with the container mount path + fpm_args = fpm_cmd[1:] + try: + c_index = fpm_args.index("-C") + 1 + fpm_args[c_index] = "/pkg" + except ValueError: + pass + + docker_fpm_cmd = [ + "docker", + "run", + "--rm", + "--platform", + "linux/amd64", + "-v", + f"{staging_dir}:/pkg", + "-v", + f"{app_dir}:/out", + "-w", + "/out", + fpm_image, + "fpm", + ] + fpm_args + + subprocess.run(docker_fpm_cmd, check=True) + + # Verify that a .deb was produced + deb_files = [f for f in os.listdir(app_dir) if f.endswith(".deb")] + if not deb_files: + console.print( + f"[bold red]Error:[/bold red] FPM completed but no .deb found in {app_dir}." ) + return False - tag_suffix = detect_arch() - image = f"{os.path.basename(app_dir)}:latest-{tag_suffix}" + console.print("[green]Package created successfully![/green]") + return True - # Ensure the helper script exists and is executable on the host so it - # can be executed inside the container. - prepare_script = os.path.join(template_ops_dir, "prepare_and_package.sh") - if not os.path.exists(prepare_script): - console.print( - f"[bold red]Error:[/bold red] Helper script not found at {prepare_script}" - ) - return False + except subprocess.CalledProcessError as exc: + console.print(f"[bold red]Error:[/bold red] FPM failed: {exc}") + return False - # Make sure the script has execute permissions (mostly relevant for - # Windows or freshly-cloned repos). - os.chmod(prepare_script, 0o755) - - cmd = [ - "docker", - "run", - "-i", - "--rm", - "-v", - f"{os.path.abspath(app_dir)}:/home/workspace", - "-v", - f"{template_ops_dir}:/synapse_ops:ro", - image, - "/bin/bash", - "/synapse_ops/prepare_and_package.sh", - app_name, - ] - - # Run the packaging script in Docker - capture output - print(f"Running packaging script in Docker: {image}") - try: - # Capture output to prevent it from interfering with progress bars - result = subprocess.run( - cmd, check=True, cwd=app_dir, capture_output=True, text=True - ) + finally: + pass - # Only log errors if they occur - if result.stderr and ( - "error" in result.stderr.lower() or "fail" in result.stderr.lower() - ): - console.print(f"[bold red]Warning:[/bold red] {result.stderr}") - progress.update(package_task, advance=1) +def package_app(app_dir, app_name): + """Package *app_name* into a .deb using the pure-Python builder.""" - # Display success message after completion - console.print("[green]Package created successfully![/green]") - return True - except subprocess.CalledProcessError as e: - console.print( - f"[bold red]Error:[/bold red] Failed to package application: {e}" - ) - if e.stderr: - console.print(f"[red]{e.stderr}[/red]") - return False + return build_deb_package(app_dir, app_name) def find_deb_package(app_dir): @@ -256,8 +292,6 @@ def get_device_credentials(ip_address): def deploy_package(ip_address, deb_package_path): """Deploy the package to the device""" package_filename = os.path.basename(deb_package_path) - - # Stop any previous progress display console.clear_live() # Get cached credentials or prompt for new ones @@ -276,9 +310,10 @@ def deploy_package(ip_address, deb_package_path): with Progress( SpinnerColumn(), TextColumn("[bold blue]{task.description}[/bold blue]"), - BarColumn(), TimeElapsedColumn(), console=console, + transient=True, + refresh_per_second=4, ) as progress: deploy_task = progress.add_task( f"[yellow]Deploying to {ip_address}...", total=3 @@ -318,37 +353,105 @@ def deploy_package(ip_address, deb_package_path): # Install task install_task = progress.add_task("[magenta]Installing package...", total=1) + progress.stop() try: - # Use expect-like behavior with Paramiko to handle su - shell = client.invoke_shell() - - # Set up a way to collect output - output = "" - - # Send su command - shell.send("su -\n") - time.sleep(1) # Wait for password prompt - - # Send root password - shell.send(f"{root_password}\n") - time.sleep(1) # Wait for su to authenticate - - # Send dpkg command - shell.send(f"dpkg -i {remote_path}\n") - time.sleep(5) # Give dpkg time to run - - # Exit from root shell - shell.send("exit\n") - time.sleep(0.5) - - # Collect the final output - while shell.recv_ready(): - chunk = shell.recv(4096).decode("utf-8") - output += chunk - - # Check for common error indicators - if "error" in output.lower() or "failed" in output.lower(): + import time + + def run_remote(cmd: str, needs_password: bool = False): + """Execute *cmd* over SSH, stream live output, and return (exit_status, full_output). + + If *needs_password* is True the helper waits until a password + prompt is detected before writing *root_password* to *stdin*. + This behaves well for environments that rely solely on + *su* for privilege escalation because writing the + password too early can cause *su* to ignore it and block + indefinitely. + """ + stdin, stdout, stderr = client.exec_command(cmd, get_pty=True) + + output = "" + pw_sent = False + buf_out = "" + buf_err = "" + + def maybe_print(line: str, *, is_err: bool = False): + """Filter *line* and print if it should be visible.""" + clean = line.replace("\r", "") + + if "Reading database" in clean: + return + + if is_err: + log_console.print(clean, style="red", end="") + else: + log_console.print(clean, end="") + + while not stdout.channel.exit_status_ready(): + while stdout.channel.recv_ready(): + chunk = stdout.channel.recv(1024).decode(errors="replace") + output += chunk + + if ( + needs_password + and ("password" in chunk.lower()) + and not pw_sent + ): + stdin.write(root_password + "\n") + stdin.flush() + pw_sent = True + + buf_out += chunk + while "\n" in buf_out: + line, buf_out = buf_out.split("\n", 1) + maybe_print(line + "\n", is_err=False) + + while stderr.channel.recv_ready(): + chunk = stderr.channel.recv(1024).decode(errors="replace") + output += chunk + + if ( + needs_password + and ("password" in chunk.lower()) + and not pw_sent + ): + stdin.write(root_password + "\n") + stdin.flush() + pw_sent = True + + buf_err += chunk + while "\n" in buf_err: + line, buf_err = buf_err.split("\n", 1) + maybe_print(line + "\n", is_err=True) + + time.sleep(0.1) + + if buf_out: + maybe_print(buf_out, is_err=False) + buf_out = "" + if buf_err: + maybe_print(buf_err, is_err=True) + buf_err = "" + + output += stdout.read().decode() + output += stderr.read().decode() + exit_status = stdout.channel.recv_exit_status() + return exit_status, output + + # If we are already root, skip any privilege escalation + if username == "root": + esc_cmd = f"DEBIAN_FRONTEND=noninteractive dpkg -i {remote_path} && rm {remote_path}" + exit_status, output = run_remote(esc_cmd) + else: + # Elevate privileges with su (target devices never have sudo) + su_cmd = f"su -c 'env DEBIAN_FRONTEND=noninteractive dpkg -i {remote_path} && rm {remote_path}'" + exit_status, output = run_remote(su_cmd, needs_password=True) + + # Restart the live progress display now that installation is + # complete so subsequent updates render properly. + progress.start() + + if exit_status != 0: progress.update(install_task, visible=False) progress.update(deploy_task, visible=False) console.print( @@ -361,17 +464,14 @@ def deploy_package(ip_address, deb_package_path): ) return False - # Cleanup - shell.send(f"rm {remote_path}\n") - time.sleep(0.5) - - # Mark installation as complete progress.update(install_task, completed=1) progress.update(deploy_task, advance=1) - # Save successful credentials save_credentials(ip_address, username, login_password, root_password) + progress.stop() + console.clear_live() + console.print( Panel( f"[bold green]Successfully deployed[/bold green] [yellow]{package_filename}[/yellow] [bold green]to[/bold green] [blue]{ip_address}[/blue]", @@ -383,6 +483,7 @@ def deploy_package(ip_address, deb_package_path): return True except Exception as e: + progress.start() progress.update(install_task, visible=False) progress.update(deploy_task, visible=False) console.print( @@ -490,24 +591,10 @@ def build_app(app_dir, app_name): f"[yellow]Docker image {image} not found, building it first...[/yellow]" ) - # Always use the shared build_docker.sh script - build_docker_script = get_build_docker_script() - - if not os.path.exists(build_docker_script): - console.print( - f"[bold red]Error:[/bold red] Could not find Docker build script at {build_docker_script}" - ) - return False - - # Make sure the script is executable - os.chmod(build_docker_script, 0o755) - + # Build the Docker image directly via Python helper try: - # Run the build script without capturing output so user can see progress - console.print("[blue]Running build_docker.sh...[/blue]") - subprocess.run(["bash", build_docker_script], check=True, cwd=app_dir) - console.print("[green]Successfully built Docker image.[/green]") - except subprocess.CalledProcessError as e: + image = build_docker_image(app_dir, app_name) + except (subprocess.CalledProcessError, FileNotFoundError) as e: console.print(f"[bold red]Error:[/bold red] Failed to build Docker image: {e}") return False @@ -660,7 +747,6 @@ def deploy_cmd(args): def add_commands(subparsers): """Add deploy commands to the CLI""" - # Deploy command deploy_parser = subparsers.add_parser( "deploy", help="Deploy an application to a Synapse device" ) @@ -670,40 +756,12 @@ def add_commands(subparsers): deploy_parser.set_defaults(func=deploy_cmd) -# --------------------------------------------------------------------------- -# Helper utilities shared across this module -# --------------------------------------------------------------------------- - - -def get_synapse_root() -> str: - """Return the absolute path to the *synapse-python* repository root.""" - script_dir = os.path.dirname(os.path.abspath(__file__)) - return os.path.abspath(os.path.join(script_dir, "..", "..")) - - -def get_build_docker_script() -> str: - """Return the canonical *build_docker.sh* path used throughout the tool.""" - return os.path.join( - get_synapse_root(), "synapse", "templates", "app", "build_docker.sh" - ) - - -def get_template_ops_dir() -> str: - """Return the path to the shared *ops* template directory.""" - return os.path.join(get_synapse_root(), "synapse", "templates", "app", "ops") - - def detect_arch() -> str: """Return an architecture tag suffix (``arm64`` or ``amd64``).""" arch = subprocess.check_output(["uname", "-m"]).decode("utf-8").strip() return "arm64" if arch in ("arm64", "aarch64") else "amd64" -# --------------------------------------------------------------------------- -# Environment sanity-check helpers -# --------------------------------------------------------------------------- - - def ensure_docker() -> bool: """Return True if the *docker* CLI is available and the daemon responds. @@ -729,3 +787,53 @@ def ensure_docker() -> bool: "[bold red]Error:[/bold red] Docker daemon does not appear to be running. Please start Docker and try again." ) return False + + +def build_docker_image(app_dir: str, app_name: str | None = None) -> str: + """Build (or rebuild) the SDK Docker image used for cross-compiling *app_name*. + + Returns the fully-qualified image tag (``:latest-``) or raises + ``subprocess.CalledProcessError`` if the build fails. + """ + if app_name is None: + app_name = os.path.basename(app_dir) + + arch_suffix = detect_arch() # "arm64" or "amd64" + + # Pick an arch-specific Dockerfile if it exists, otherwise fall back to the + # generic one. + dockerfile_rel = ( + f"ops/docker/Dockerfile.{arch_suffix}" + if arch_suffix == "arm64" + else "ops/docker/Dockerfile" + ) + dockerfile_path = os.path.join(app_dir, dockerfile_rel) + if not os.path.exists(dockerfile_path): + # Last chance: fall back to the generic Dockerfile regardless of arch. + dockerfile_path = os.path.join(app_dir, "ops/docker/Dockerfile") + + if not os.path.exists(dockerfile_path): + raise FileNotFoundError( + f"Expected Dockerfile not found at {dockerfile_path}. " + "Ensure your application provides the required build Dockerfile(s)." + ) + + image_tag = f"{app_name}:latest-{arch_suffix}" + + console.print(f"[yellow]Building Docker image [bold]{image_tag}[/bold]...[/yellow]") + subprocess.run( + [ + "docker", + "build", + "-t", + image_tag, + "-f", + dockerfile_path, + ".", + ], + check=True, + cwd=app_dir, + ) + + console.print(f"[green]Successfully built Docker image {image_tag}[/green]") + return image_tag diff --git a/synapse/templates/app/build_docker.sh b/synapse/templates/app/build_docker.sh deleted file mode 100755 index 082062fb..00000000 --- a/synapse/templates/app/build_docker.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -set -e - -# App name from manifest.json or directory name -APP_NAME=$(basename "$(pwd)") -if [ -f "manifest.json" ]; then - # Try to extract the name from manifest.json - if command -v jq &> /dev/null; then - MANIFEST_NAME=$(jq -r '.name' manifest.json 2>/dev/null) - if [ -n "$MANIFEST_NAME" ] && [ "$MANIFEST_NAME" != "null" ]; then - APP_NAME=$MANIFEST_NAME - fi - fi -fi - -# Detect architecture -ARCH=$(uname -m) -if [[ "${ARCH}" == "arm64" || "${ARCH}" == "aarch64" ]]; then - CONTAINER_TAG="arm64" - PLATFORM="linux/arm64" - DOCKERFILE_PATH="ops/docker/Dockerfile.arm64" -else - CONTAINER_TAG="amd64" - PLATFORM="linux/amd64" - DOCKERFILE_PATH="ops/docker/Dockerfile" -fi - -# Image names -SDK_IMAGE="${APP_NAME}:latest-${CONTAINER_TAG}" - -echo "Building for architecture: $ARCH" -echo "Application name: $APP_NAME" - -# Build the SDK image -docker build -t $SDK_IMAGE -f "${DOCKERFILE_PATH}" . - -echo "Successfully built $SDK_IMAGE" diff --git a/synapse/templates/app/ops/package/package.sh b/synapse/templates/app/ops/package/package.sh deleted file mode 100755 index 61ab7b3d..00000000 --- a/synapse/templates/app/ops/package/package.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -set -e - -SYNAPSE_APP_VERSION="0.1.0" -SYNAPSE_APP_EXE="{{APP_NAME}}" - -SCRIPT_DIR=$(dirname "$0") -SOURCE_DIR="${SCRIPT_DIR}/../../" -# Check multiple possible build directories -BUILD_DIRS=( - "${SOURCE_DIR}/build-aarch64/" - "${SOURCE_DIR}/build/" - "${SOURCE_DIR}/build-arm64/" - "${SOURCE_DIR}/out/" -) - -STAGING_DIR="/tmp/synapse-package" -mkdir -p ${STAGING_DIR} - -# Binary install and setup -mkdir -p ${STAGING_DIR}/opt/scifi/bin - -# Try to find the binary in various possible build directories -BINARY_FOUND=false -for BUILD_DIR in "${BUILD_DIRS[@]}"; do - if [ -f "${BUILD_DIR}/${SYNAPSE_APP_EXE}" ]; then - echo "Found binary at ${BUILD_DIR}/${SYNAPSE_APP_EXE}" - cp "${BUILD_DIR}/${SYNAPSE_APP_EXE}" "${STAGING_DIR}/opt/scifi/bin/" - BINARY_FOUND=true - break - fi -done - -# If we didn't find the binary, try to find it anywhere in the source directory -if [ "$BINARY_FOUND" = false ]; then - echo "Binary not found in standard build directories, searching source directory..." - BINARY_PATH=$(find "${SOURCE_DIR}" -name "${SYNAPSE_APP_EXE}" -type f | grep -v "${STAGING_DIR}" | head -n 1) - - if [ -n "$BINARY_PATH" ]; then - echo "Found binary at ${BINARY_PATH}" - cp "${BINARY_PATH}" "${STAGING_DIR}/opt/scifi/bin/" - BINARY_FOUND=true - else - echo "ERROR: Could not find binary ${SYNAPSE_APP_EXE} in any build directory!" - exit 1 - fi -fi - -# Launch script -mkdir -p ${STAGING_DIR}/opt/scifi/scripts -cp "${SCRIPT_DIR}/scripts/launch_synapse_app.sh" "${STAGING_DIR}/opt/scifi/scripts/" - -# Systemd service install and setup -mkdir -p ${STAGING_DIR}/etc/systemd/system -cp "${SCRIPT_DIR}/systemd/${SYNAPSE_APP_EXE}.service" "${STAGING_DIR}/etc/systemd/system/" - -# App SDK -APP_SDK_DOCKER_DIR="/usr/lib/" -APP_SDK_LIB_TARGET_DIR="${STAGING_DIR}/opt/scifi/lib" -mkdir -p ${APP_SDK_LIB_TARGET_DIR} -find "${APP_SDK_DOCKER_DIR}" -name "libsynapse*.so*" -exec cp -av {} ${APP_SDK_LIB_TARGET_DIR}/ \; - -fpm -s dir -t deb \ - -n "${SYNAPSE_APP_EXE}" \ - -f \ - -v "${SYNAPSE_APP_VERSION}" \ - -C ${STAGING_DIR} \ - --deb-no-default-config-files \ - --depends "systemd" \ - --vendor "Science Corporation" \ - --description "Synapse Application" \ - --architecture arm64 \ - --after-install "${SCRIPT_DIR}/scripts/postinstall.sh" \ - --before-remove "${SCRIPT_DIR}/scripts/preremove.sh" \ - --after-remove "${SCRIPT_DIR}/scripts/postremove.sh" \ - . \ No newline at end of file diff --git a/synapse/templates/app/ops/package/scripts/launch_synapse_app.sh b/synapse/templates/app/ops/package/scripts/launch_synapse_app.sh deleted file mode 100755 index e314f809..00000000 --- a/synapse/templates/app/ops/package/scripts/launch_synapse_app.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -# Launches app -# Will need to be run as root -# We might be able to get away with this as a normal user using CAP_SYS_NICE and CAP_SYS_RESOURCE, but we'll need to test -# TODO: this should be configurable -SYNAPSE_APP_EXE="{{APP_NAME}}" -MANIFEST_FILE="/opt/scifi/config/app_manifest.json" - -# Set max UDP write buffer size to 4MB -sysctl -w net.core.wmem_max=4194304 -sysctl -w net.core.wmem_default=4194304 - -# Set up LD_LIBRARY_PATH to prefer our local libraries and user libraries -export LD_LIBRARY_PATH=/opt/scifi/usr-libs:/opt/scifi/lib:$LD_LIBRARY_PATH - -# Launch the server -export SCIFI_ROOT=${SCIFI_ROOT:-/opt/scifi} -PATH_TO_EXE="$SCIFI_ROOT/bin/${SYNAPSE_APP_EXE}" -if [ ! -x "${PATH_TO_EXE}" ]; then - echo "Server binary not found or not executable" >&2 - exit 1 -fi - -exec "${PATH_TO_EXE}" "$@" \ No newline at end of file diff --git a/synapse/templates/app/ops/package/scripts/postinstall.sh b/synapse/templates/app/ops/package/scripts/postinstall.sh deleted file mode 100755 index ef39fd3e..00000000 --- a/synapse/templates/app/ops/package/scripts/postinstall.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -# App name variable that will be replaced -SYNAPSE_APP_EXE="{{APP_NAME}}" - -# Set up and reload udev rules -udevadm control --reload-rules -udevadm trigger - -# Set permissions for the executable -chown root:root /opt/scifi/bin/"${SYNAPSE_APP_EXE}" -chmod 755 /opt/scifi/bin/"${SYNAPSE_APP_EXE}" - -# Reload and start the service -systemctl daemon-reload diff --git a/synapse/templates/app/ops/package/scripts/postremove.sh b/synapse/templates/app/ops/package/scripts/postremove.sh deleted file mode 100755 index f1f5c52b..00000000 --- a/synapse/templates/app/ops/package/scripts/postremove.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -set -e - -# Reload systemd -systemctl daemon-reload \ No newline at end of file diff --git a/synapse/templates/app/ops/package/scripts/preremove.sh b/synapse/templates/app/ops/package/scripts/preremove.sh deleted file mode 100755 index 45986afc..00000000 --- a/synapse/templates/app/ops/package/scripts/preremove.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -set -e - -# App name variable that will be replaced -SYNAPSE_APP_EXE="{{APP_NAME}}" - -# Stop and disable the service -systemctl stop "${SYNAPSE_APP_EXE}" || true -systemctl disable "${SYNAPSE_APP_EXE}" || true \ No newline at end of file diff --git a/synapse/templates/app/ops/package/systemd/{{APP_NAME}}.service b/synapse/templates/app/ops/package/systemd/{{APP_NAME}}.service deleted file mode 100644 index c7860ab8..00000000 --- a/synapse/templates/app/ops/package/systemd/{{APP_NAME}}.service +++ /dev/null @@ -1,27 +0,0 @@ -[Unit] -Description=Synapse Application -After=network-online.target - -# We need to wait for the network to be online before starting the service -Wants=network-online.target - -# We need to wait for the udev service to be ready before starting the server -Requires=systemd-udevd.service -After=systemd-udevd.service - -# TODO: Make user run as non-root -[Service] -Type=exec -User=root -Restart=no - -ExecStart=/bin/bash -c "/opt/scifi/scripts/launch_synapse_app.sh" - -# Working directory -WorkingDirectory=/opt/scifi - -# Environment variables -Environment=SCIFI_ROOT=/opt/scifi - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/synapse/templates/app/ops/prepare_and_package.sh b/synapse/templates/app/ops/prepare_and_package.sh deleted file mode 100755 index ccbbdd40..00000000 --- a/synapse/templates/app/ops/prepare_and_package.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -set -e - -# --------------------------------------------------------------------------- -# prepare_and_package.sh -# --------------------------------------------------------------------------- -# This script copies the shared *ops* template directory to a temporary working -# location, replaces placeholder tokens with the supplied application name, and -# then runs the main `package.sh` script so that a Debian package is produced. -# -# Usage (inside the Docker packaging container): -# /synapse_ops/prepare_and_package.sh -# --------------------------------------------------------------------------- - -if [[ $# -ne 1 ]]; then - echo "Usage: $0 " >&2 - exit 1 -fi - -APP_NAME="$1" - -# The template directory is mounted read-only by the CLI. Work on a copy so we -# can make in-place edits while preserving the pristine original. -TEMPLATE_DIR="/synapse_ops" -TEMP_DIR="/tmp/synapse_package_ops" -rm -rf "$TEMP_DIR" -cp -r "$TEMPLATE_DIR" "$TEMP_DIR" - -# Replace all occurrences of the placeholder token in every file of the copied -# template directory. -find "$TEMP_DIR" -type f -exec sed -i "s/{{APP_NAME}}/${APP_NAME}/g" {} + - -# The systemd service file itself has the placeholder in its *filename* so we -# need to rename it as well. -SERVICE_TEMPLATE="$TEMP_DIR/package/systemd/{{APP_NAME}}.service" -if [[ -f "$SERVICE_TEMPLATE" ]]; then - mv "$SERVICE_TEMPLATE" "$TEMP_DIR/package/systemd/${APP_NAME}.service" -fi - -# The main packaging script expects SOURCE_DIR to point at the application -# workspace mounted from the host. Ensure the variable is set correctly and -# that the script is executable. -chmod +x "$TEMP_DIR/package/package.sh" -sed -i 's|SOURCE_DIR=.*|SOURCE_DIR="/home/workspace"|' "$TEMP_DIR/package/package.sh" - -# Kick off the actual `.deb` creation process. -cd /home/workspace -bash "$TEMP_DIR/package/package.sh" \ No newline at end of file From 7112a88ea6cf1339e03606be0860f2348f6b9334 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Fri, 9 May 2025 19:16:40 -0700 Subject: [PATCH 21/25] revert manifest --- MANIFEST.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index e375b7fd..4c3fecf0 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +1 @@ -recursive-include synapse *.py *.pyx *.pyd *.c *.h *.sh *.service +recursive-include synapse *.py *.pyx *.pyd *.c *.h From 3ecc8fe674f3899df8b54ae2a56a91e2b6895f35 Mon Sep 17 00:00:00 2001 From: Gilbert Montague Date: Fri, 9 May 2025 19:26:45 -0700 Subject: [PATCH 22/25] Fixed build on linux --- synapse/cli/deploy.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index 0ddebb54..dad83f63 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -150,8 +150,6 @@ def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bo # ------------------------------------------------------------------ # 4. Build the .deb with FPM # ------------------------------------------------------------------ - arch = detect_arch() - fpm_cmd = [ "fpm", "-s", @@ -173,7 +171,7 @@ def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bo "--description", "Synapse Application", "--architecture", - arch, + "arm64", ] # Attach lifecycle scripts (referenced relative to /pkg inside container) @@ -224,7 +222,9 @@ def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bo subprocess.run(docker_fpm_cmd, check=True) # Verify that a .deb was produced - deb_files = [f for f in os.listdir(app_dir) if f.endswith(".deb")] + deb_files = [ + f for f in os.listdir(app_dir) if f.endswith(".deb") and "arm64" in f + ] if not deb_files: console.print( f"[bold red]Error:[/bold red] FPM completed but no .deb found in {app_dir}." From e5eae07d5f5f8ac86210c4432e5ca76c1bac12db Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Fri, 9 May 2025 19:30:09 -0700 Subject: [PATCH 23/25] remove some cruft --- synapse/cli/deploy.py | 73 +++++++++++++------------------------------ 1 file changed, 21 insertions(+), 52 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index dad83f63..c8470ab1 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -58,21 +58,11 @@ def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bo try: staging_dir = tempfile.mkdtemp(prefix="synapse-package-") + binary_path = os.path.join(app_dir, "build-aarch64", app_name) - # ------------------------------------------------------------------ - # 1. Locate the compiled binary and copy it to /opt/scifi/bin - # ------------------------------------------------------------------ - possible_bins = [ - os.path.join(app_dir, "build-aarch64", app_name), - os.path.join(app_dir, "build", app_name), - os.path.join(app_dir, "build-arm64", app_name), - ] - - binary_path = next((p for p in possible_bins if os.path.exists(p)), None) - - if binary_path is None: + if not os.path.exists(binary_path): console.print( - f"[bold red]Error:[/bold red] Compiled binary '{app_name}' not found." + f"[bold red]Error:[/bold red] Compiled binary '{app_name}' not found at {binary_path}." ) return False @@ -80,10 +70,6 @@ def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bo os.makedirs(bin_dst_dir, exist_ok=True) shutil.copy2(binary_path, os.path.join(bin_dst_dir, app_name)) - # ------------------------------------------------------------------ - # 2. Generate systemd service & lifecycle scripts on the fly - # ------------------------------------------------------------------ - # Generate systemd unit svc_content = f"""[Unit] Description=Synapse Application @@ -136,9 +122,6 @@ def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bo os.chmod(postremove_path, 0o755) lifecycle_scripts_tmp.append(postremove_path) - # ------------------------------------------------------------------ - # 3. Copy user-space Synapse SDK shared libs - # ------------------------------------------------------------------ lib_dst_dir = os.path.join(staging_dir, "opt", "scifi", "lib") os.makedirs(lib_dst_dir, exist_ok=True) for lib in glob.glob("/usr/lib/libsynapse*.so*"): @@ -147,9 +130,6 @@ def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bo except PermissionError: console.print(f"[yellow]Skipping lib copy (perm): {lib}[/yellow]") - # ------------------------------------------------------------------ - # 4. Build the .deb with FPM - # ------------------------------------------------------------------ fpm_cmd = [ "fpm", "-s", @@ -188,10 +168,6 @@ def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bo fpm_cmd.append(".") - # ------------------------------------------------------------------ - # 5. Invoke FPM in a Docker container (consistent across hosts) - # ------------------------------------------------------------------ - fpm_image = "cdrx/fpm-ubuntu:latest" console.print(f"[yellow]Running FPM (Docker image: {fpm_image}) ...[/yellow]") @@ -565,17 +541,11 @@ def build_app(app_dir, app_name): console.print(f"[yellow]Building application: {app_name}...[/yellow]") # Check if binary already exists - binary_paths = [ - os.path.join(app_dir, "build-aarch64", app_name), - os.path.join(app_dir, "build", app_name), - os.path.join(app_dir, "build-arm64", app_name), - os.path.join(app_dir, "out", app_name), - ] + binary_path = os.path.join(app_dir, "build-aarch64", app_name) - for path in binary_paths: - if os.path.exists(path): - console.print(f"[green]Binary already exists at: {path}[/green]") - return True + if os.path.exists(binary_path): + console.print(f"[green]Binary already exists at: {binary_path}[/green]") + return True # Binary doesn't exist, build it console.print("[yellow]Binary not found, attempting to build...[/yellow]") @@ -645,14 +615,14 @@ def build_app(app_dir, app_name): # Fall back to manual configuration echo 'No CMake presets found, using manual configuration...' && export VCPKG_DEFAULT_TRIPLET=arm64-linux-dynamic-release && - cmake -B build -S . \ + cmake -B build-aarch64 -S . \ -DCMAKE_TOOLCHAIN_FILE=${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake \ -DVCPKG_TARGET_TRIPLET=arm64-linux-dynamic-release \ -DVCPKG_INSTALLED_DIR=${VCPKG_ROOT}/vcpkg_installed \ -DBUILD_SHARED_LIBS=ON \ -DCMAKE_BUILD_TYPE=Release \ -DBUILD_FOR_ARM64=ON && - cmake --build build -j$(nproc); + cmake --build build-aarch64 -j$(nproc); fi""", ] @@ -662,33 +632,32 @@ def build_app(app_dir, app_name): subprocess.run(build_cmd, check=True, cwd=app_dir) # Check if build succeeded - for path in binary_paths: - if os.path.exists(path): - console.print(f"[green]Successfully built binary at: {path}[/green]") - return True + if os.path.exists(binary_path): + console.print(f"[green]Successfully built binary at: {binary_path}[/green]") + return True # If we get here, the build might have succeeded but we can't find the binary console.print( - "[bold yellow]Warning: Build completed but binary not found in expected locations.[/bold yellow]" + f"[bold yellow]Warning: Build completed but binary not found at expected location: {binary_path}[/bold yellow]" ) # Try to find it manually - binary_path = subprocess.run( + binary_found = subprocess.run( ["find", app_dir, "-type", "f", "-name", app_name, "-not", "-path", "*/.*"], capture_output=True, text=True, check=False, ).stdout.strip() - if binary_path: - binary_path = binary_path.split("\n")[0] # Take the first match if multiple - console.print(f"[green]Found binary at: {binary_path}[/green]") + if binary_found: + binary_found_path = binary_found.split("\n")[0] # Take the first match if multiple + console.print(f"[green]Found binary at: {binary_found_path}[/green]") - # Try to copy it to one of the standard locations - build_dir = os.path.join(app_dir, "build") + # Try to copy it to the standard location + build_dir = os.path.dirname(binary_path) os.makedirs(build_dir, exist_ok=True) - shutil.copy(binary_path, os.path.join(build_dir, app_name)) + shutil.copy(binary_found_path, binary_path) console.print( - f"[green]Copied binary to: {os.path.join(build_dir, app_name)}[/green]" + f"[green]Copied binary to: {binary_path}[/green]" ) return True From 5e910ea6fb12e45ad99787821008f59db997fcd2 Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Fri, 9 May 2025 19:36:18 -0700 Subject: [PATCH 24/25] recursive copy for sdk --- synapse/cli/deploy.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index c8470ab1..7577848c 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -124,7 +124,8 @@ def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bo lib_dst_dir = os.path.join(staging_dir, "opt", "scifi", "lib") os.makedirs(lib_dst_dir, exist_ok=True) - for lib in glob.glob("/usr/lib/libsynapse*.so*"): + # Recursively copy all libsynapse shared libraries from /usr/lib (mirrors + for lib in glob.glob("/usr/lib/**/libsynapse*.so*", recursive=True): try: shutil.copy2(lib, lib_dst_dir) except PermissionError: From 438b3303f313125c45719d9556127b470154049e Mon Sep 17 00:00:00 2001 From: Emma Zhou Date: Fri, 9 May 2025 19:44:56 -0700 Subject: [PATCH 25/25] fix sdk copy --- synapse/cli/deploy.py | 45 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/synapse/cli/deploy.py b/synapse/cli/deploy.py index 7577848c..10711983 100644 --- a/synapse/cli/deploy.py +++ b/synapse/cli/deploy.py @@ -124,12 +124,45 @@ def build_deb_package(app_dir: str, app_name: str, version: str = "0.1.0") -> bo lib_dst_dir = os.path.join(staging_dir, "opt", "scifi", "lib") os.makedirs(lib_dst_dir, exist_ok=True) - # Recursively copy all libsynapse shared libraries from /usr/lib (mirrors - for lib in glob.glob("/usr/lib/**/libsynapse*.so*", recursive=True): - try: - shutil.copy2(lib, lib_dst_dir) - except PermissionError: - console.print(f"[yellow]Skipping lib copy (perm): {lib}[/yellow]") + + try: + arch_suffix = detect_arch() # "arm64" or "amd64" + image_tag = f"{app_name}:latest-{arch_suffix}" + platform_opt = "linux/arm64" if arch_suffix == "arm64" else "linux/amd64" + + console.print( + f"[yellow]Extracting SDK libraries from Docker image [bold]{image_tag}[/bold]...[/yellow]" + ) + + docker_cmd = [ + "docker", + "run", + "--rm", + "--platform", + platform_opt, + "-v", + f"{lib_dst_dir}:/out", + image_tag, + "/bin/bash", + "-c", + "find /usr/lib -name 'libsynapse*.so*' -exec cp -av {} /out/ \\;", + ] + + subprocess.run(docker_cmd, check=True) + + except subprocess.CalledProcessError as e: + console.print( + f"[bold red]Error:[/bold red] Failed to copy SDK libraries from Docker image: {e}" + ) + console.print( + "[yellow]Falling back to host /usr/lib lookup for libsynapse*.so* (results may be incomplete).[/yellow]" + ) + + for lib in glob.glob("/usr/lib/**/libsynapse*.so*", recursive=True): + try: + shutil.copy2(lib, lib_dst_dir) + except PermissionError: + console.print(f"[yellow]Skipping lib copy (perm): {lib}[/yellow]") fpm_cmd = [ "fpm",