diff --git a/src/cmd/cli/command/commands.go b/src/cmd/cli/command/commands.go index 0cb625658..efe1f7545 100644 --- a/src/cmd/cli/command/commands.go +++ b/src/cmd/cli/command/commands.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "regexp" "strings" @@ -49,7 +50,7 @@ func Execute(ctx context.Context) error { if err := RootCmd.ExecuteContext(ctx); err != nil { if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { - term.Error("Error:", client.PrettyError(err)) + slog.ErrorContext(ctx, fmt.Sprint("Error:", client.PrettyError(err))) track.Evt("CLI Error", P("err", err)) } @@ -101,15 +102,15 @@ func Execute(ctx context.Context) error { } if global.HasTty && term.HadWarnings() { - term.Println("For help with warnings, check our FAQ at https://s.defang.io/warnings") + fmt.Println("For help with warnings, check our FAQ at https://s.defang.io/warnings") } if global.HasTty && !global.HideUpdate && pkg.RandomIndex(10) == 0 { if latest, err := github.GetLatestReleaseTag(ctx); err == nil && isNewer(GetCurrentVersion(), latest) { - term.Debug("Latest Version:", latest, "Current Version:", GetCurrentVersion()) - term.Println("A newer version of the CLI is available at https://github.com/DefangLabs/defang/releases/latest") + slog.Debug("Newer version", "github", latest, "current", GetCurrentVersion()) + fmt.Println("A newer version of the CLI is available at https://github.com/DefangLabs/defang/releases/latest") if pkg.RandomIndex(10) == 0 && !pkg.GetenvBool("DEFANG_HIDE_HINTS") { - term.Println("To silence these notices, do: export DEFANG_HIDE_UPDATE=1") + fmt.Println("To silence these notices, do: export DEFANG_HIDE_UPDATE=1") } } } @@ -409,16 +410,16 @@ var RootCmd = &cobra.Command{ if connect.CodeOf(err) != connect.CodeUnauthenticated { return err } - term.Debug("Using existing token failed; continuing to allow login/ToS flow:", err) + slog.Debug(fmt.Sprint("Using existing token failed; continuing to allow login/ToS flow:", err)) } track.Tracker = global.Client // update tracker with the real client if v, err := global.Client.GetVersions(ctx); err == nil { version := cmd.Root().Version // HACK to avoid circular dependency with RootCmd - term.Debug("Fabric:", v.Fabric, "CLI:", version, "CLI-Min:", v.CliMin) + slog.Debug(fmt.Sprint("Fabric:", v.Fabric, "CLI:", version, "CLI-Min:", v.CliMin)) if global.HasTty && isNewer(version, v.CliMin) && !isUpgradeCommand(cmd) { - term.Warn("Your CLI version is outdated. Please upgrade to the latest version by running:\n\n defang upgrade\n") + slog.WarnContext(ctx, "Your CLI version is outdated. Please upgrade to the latest version by running:\n\n defang upgrade\n") global.HideUpdate = true // hide the upgrade hint at the end } } diff --git a/src/cmd/cli/command/compose.go b/src/cmd/cli/command/compose.go index 92a82f34e..1c177c053 100644 --- a/src/cmd/cli/command/compose.go +++ b/src/cmd/cli/command/compose.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "log/slog" "slices" "strings" "time" @@ -35,9 +36,9 @@ const SERVICE_PORTAL_URL = "https://" + DEFANG_PORTAL_HOST + "/service" func printPlaygroundPortalServiceURLs(serviceInfos []*defangv1.ServiceInfo) { // We can only show services deployed to the prod1 defang SaaS environment. if global.Stack.Provider == client.ProviderDefang && global.FabricAddr == client.DefaultFabricAddr { - term.Info("Monitor your services' status in the defang portal") + slog.Info("Monitor your services' status in the defang portal") for _, serviceInfo := range serviceInfos { - term.Println(" -", SERVICE_PORTAL_URL+"/"+serviceInfo.Service.Name) + fmt.Println(" -", SERVICE_PORTAL_URL+"/"+serviceInfo.Service.Name) } } } @@ -94,9 +95,9 @@ func makeComposeUpCmd() *cobra.Command { Type: defangv1.DeploymentType_DEPLOYMENT_TYPE_ACTIVE, Stack: session.Stack.Name, }); err != nil { - term.Debugf("ListDeployments failed: %v", err) + slog.Debug("ListDeployments failed", "err", err) } else if accountInfo, err := session.Provider.AccountInfo(ctx); err != nil { - term.Debugf("AccountInfo failed: %v", err) + slog.Debug("AccountInfo failed", "err", err) } else if len(resp.Deployments) > 0 { workingDir, _ := session.Loader.ProjectWorkingDir(ctx) confirmed, err := confirmDeployment(workingDir, resp.Deployments, accountInfo, session.Provider.GetStackName()) @@ -115,7 +116,7 @@ func makeComposeUpCmd() *cobra.Command { Mode: session.Stack.Mode, }) if err != nil { - term.Debug("Failed to create stack:", err) + slog.Debug(fmt.Sprint("Failed to create stack:", err)) } } @@ -127,7 +128,7 @@ func makeComposeUpCmd() *cobra.Command { } } if len(managedServices) > 0 { - term.Warnf("Defang cannot monitor status of the following managed service(s): %v.\n To check if the managed service is up, check the status of the service which depends on it.", managedServices) + slog.WarnContext(ctx, fmt.Sprintf("Defang cannot monitor status of the following managed service(s): %v.\n To check if the managed service is up, check the status of the service which depends on it.", managedServices)) } deploy, project, err := cli.ComposeUp(ctx, global.Client, session.Provider, session.Stack, cli.ComposeUpParams{ @@ -151,7 +152,7 @@ func makeComposeUpCmd() *cobra.Command { printPlaygroundPortalServiceURLs(deploy.Services) if detach { - term.Info("Detached.") + slog.InfoContext(ctx, "Detached.") return nil } @@ -160,7 +161,7 @@ func makeComposeUpCmd() *cobra.Command { if deploy.Etag != "" { tailSource = "deployment ID " + deploy.Etag } - term.Info("Tailing logs for", tailSource, "; press Ctrl+C to detach:") + slog.InfoContext(ctx, fmt.Sprint("Tailing logs for", tailSource, "; press Ctrl+C to detach:")) tailOptions := newTailOptionsForDeploy(session.Stack.Name, deploy.Etag, since, global.Verbose) serviceStates, err := cli.TailAndMonitor(ctx, project, session.Provider, time.Duration(waitTimeout)*time.Second, tailOptions) @@ -168,7 +169,7 @@ func makeComposeUpCmd() *cobra.Command { deploymentErr := err debugger, err := debug.NewDebugger(ctx, global.FabricAddr, session.Stack) if err != nil { - term.Warn("Failed to initialize debugger:", err) + slog.WarnContext(ctx, fmt.Sprint("Failed to initialize debugger:", err)) return deploymentErr } handleTailAndMonitorErr(ctx, deploymentErr, debugger, debug.DebugConfig{ @@ -197,7 +198,7 @@ func makeComposeUpCmd() *cobra.Command { return err } - term.Info("Done.") + slog.InfoContext(ctx, "Done.") flushWarnings() return nil }, @@ -246,7 +247,7 @@ func confirmDeployment(targetDirectory string, existingDeployments []*defangv1.D Mode: global.Stack.Mode, }) if err != nil { - term.Debugf("Failed to create stack %v", err) + slog.Debug("Failed to create stack", "err", err) } else { stacks.PrintCreateMessage(stackName) } @@ -255,7 +256,7 @@ func confirmDeployment(targetDirectory string, existingDeployments []*defangv1.D } func printExistingDeployments(existingDeployments []*defangv1.Deployment) { - term.Info("This project was previously deployed to the following locations:") + fmt.Println("This project was previously deployed to the following locations:") deploymentStrings := make([]string, 0, len(existingDeployments)) for _, dep := range existingDeployments { var providerId client.ProviderID @@ -283,7 +284,7 @@ func confirmDeploymentToNewLocation() (bool, error) { func promptToCreateStack(ctx context.Context, targetDirectory string, params stacks.Parameters) error { if global.NonInteractive { - term.Info("Consider creating a stack to manage your deployments.") + fmt.Println("Consider creating a stack to manage your deployments.") printDefangHint("To create a stack, do:", "stack new --name="+params.Name) return nil } @@ -310,7 +311,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * } if connect.CodeOf(originalErr) == connect.CodeResourceExhausted && strings.Contains(originalErr.Error(), "maximum number of projects") { - term.Error("Error:", client.PrettyError(originalErr)) + slog.ErrorContext(ctx, fmt.Sprint("Error:", client.PrettyError(originalErr))) err := handleTooManyProjectsError(ctx, provider, originalErr) if err != nil { return originalErr @@ -322,7 +323,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * return originalErr } - term.Error("Error:", client.PrettyError(originalErr)) + slog.ErrorContext(ctx, fmt.Sprint("Error:", client.PrettyError(originalErr))) return debugger.DebugDeploymentError(ctx, debug.DebugConfig{ Project: project, }, originalErr) @@ -331,7 +332,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * func handleTooManyProjectsError(ctx context.Context, provider client.Provider, originalErr error) error { projectName, err := provider.RemoteProjectName(ctx) if err != nil { - term.Warn("failed to get remote project name:", err) + slog.WarnContext(ctx, fmt.Sprint("failed to get remote project name:", err)) return originalErr } @@ -343,7 +344,7 @@ func handleTooManyProjectsError(ctx context.Context, provider client.Provider, o _, err = cli.InteractiveComposeDown(ctx, projectName, global.Client, provider) if err != nil { - term.Warn("ComposeDown failed:", err) + slog.WarnContext(ctx, fmt.Sprint("ComposeDown failed:", err)) printDefangHint("To deactivate a project, do:", "compose down --project-name "+projectName) return originalErr } else { @@ -358,7 +359,7 @@ func handleTailAndMonitorErr(ctx context.Context, err error, debugger *debug.Deb var errDeploymentFailed client.ErrDeploymentFailed if errors.As(err, &errDeploymentFailed) { // Tail got canceled because of deployment failure: prompt to show the debugger - term.Warn(errDeploymentFailed) + slog.WarnContext(ctx, fmt.Sprintf("%v", errDeploymentFailed)) if errDeploymentFailed.Service != "" { debugConfig.FailedServices = []string{errDeploymentFailed.Service} } @@ -437,21 +438,21 @@ func makeComposeDownCmd() *cobra.Command { if err != nil { if connect.CodeOf(err) == connect.CodeNotFound { // Show a warning (not an error) if the service was not found - term.Warn(client.PrettyError(err)) + slog.Warn(fmt.Sprintf("%v", client.PrettyError(err))) return nil } return err } - term.Info("Deleted services, deployment ID", deployment) + slog.Info(fmt.Sprint("Deleted services, deployment ID", deployment)) listConfigs, err := session.Provider.ListConfig(cmd.Context(), &defangv1.ListConfigsRequest{Project: projectName}) if err == nil { if len(listConfigs.Names) > 0 { - term.Warn("Stored project configs are not deleted.") + slog.Warn("Stored project configs are not deleted.") } } else { - term.Debugf("ListConfigs failed: %v", err) + slog.Debug("ListConfigs failed", "err", err) } if detach { @@ -468,12 +469,12 @@ func makeComposeDownCmd() *cobra.Command { // different than `up`, which will wait for the deployment to finish, but we don't have an // ECS event subscription for `down` so we can't wait for the deployment to finish. // Instead, we'll just show a warning and detach. - term.Warn("Unable to tail logs. Detaching.") + slog.Warn("Unable to tail logs. Detaching.") return nil } return err } - term.Info("Done.") + slog.Info("Done.") if len(listConfigs.Names) > 0 { printDefangHint("To delete stored project configs, run:", "config rm --project-name="+projectName+" "+strings.Join(listConfigs.Names, " ")) } @@ -520,7 +521,7 @@ func makeComposeConfigCmd() *cobra.Command { CheckAccountInfo: false, }) if err != nil { - term.Warn("unable to load stack:", err, "- some information may not be up-to-date") + slog.WarnContext(ctx, fmt.Sprint("unable to load stack:", err, "- some information may not be up-to-date")) sessionx = &session.Session{ Loader: configureLoaderForCommand(cmd), Provider: client.NewPlaygroundProvider(global.Client, stacks.DefaultBeta), @@ -530,7 +531,7 @@ func makeComposeConfigCmd() *cobra.Command { _, err = sessionx.Provider.AccountInfo(ctx) if err != nil { - term.Warn("unable to connect to cloud provider:", err, "- some information may not be up-to-date") + slog.WarnContext(ctx, fmt.Sprint("unable to connect to cloud provider:", err, "- some information may not be up-to-date")) } project, loadErr := sessionx.Loader.LoadProject(ctx) @@ -580,7 +581,7 @@ func makeComposePsCmd() *cobra.Command { return err } - term.Warn(err) + slog.Warn(fmt.Sprintf("%v", err)) printDefangHint("To start a new project, do:", "new") return nil } @@ -676,7 +677,7 @@ func handleLogsCmd(cmd *cobra.Command, args []string) error { if pkg.IsValidTime(untilTs) { rangeStr += " until " + untilTs.Format(time.RFC3339Nano) } - term.Infof("Showing logs%s; press Ctrl+C to stop:", rangeStr) + fmt.Printf("Showing logs%s; press Ctrl+C to stop:\n", rangeStr) services := args if len(name) > 0 { diff --git a/src/cmd/cli/command/compose_test.go b/src/cmd/cli/command/compose_test.go index ccd324664..54bf59b85 100644 --- a/src/cmd/cli/command/compose_test.go +++ b/src/cmd/cli/command/compose_test.go @@ -3,11 +3,13 @@ package command import ( "bytes" "context" + "log/slog" "os" "testing" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -25,12 +27,23 @@ func TestInitializeTailCmd(t *testing.T) { func TestPrintPlaygroundPortalServiceURLs(t *testing.T) { defaultTerm := term.DefaultTerm + oldStdout := os.Stdout t.Cleanup(func() { term.DefaultTerm = defaultTerm + os.Stdout = oldStdout }) - var stdout, stderr bytes.Buffer - term.DefaultTerm = term.NewTerm(os.Stdin, &stdout, &stderr) + // Capture slog output via term logger + var termBuf, stderr bytes.Buffer + term.DefaultTerm = term.NewTerm(os.Stdin, &termBuf, &stderr) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) + + // Capture fmt.Println output via os.Pipe + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + os.Stdout = w global.Stack.Provider = client.ProviderDefang global.FabricAddr = client.DefaultFabricAddr @@ -38,11 +51,18 @@ func TestPrintPlaygroundPortalServiceURLs(t *testing.T) { { Service: &defangv1.Service{Name: "service1"}, }}) - const want = ` * Monitor your services' status in the defang portal - - https://portal.defang.io/service/service1 -` - if got := stdout.String(); got != want { - t.Errorf("got %q, want %q", got, want) + + w.Close() + var stdoutBuf bytes.Buffer + stdoutBuf.ReadFrom(r) + + const wantSlog = " * Monitor your services' status in the defang portal\n" + if got := termBuf.String(); got != wantSlog { + t.Errorf("slog output: got %q, want %q", got, wantSlog) + } + const wantStdout = " - https://portal.defang.io/service/service1\n" + if got := stdoutBuf.String(); got != wantStdout { + t.Errorf("stdout output: got %q, want %q", got, wantStdout) } } diff --git a/src/cmd/cli/command/config.go b/src/cmd/cli/command/config.go index b779112e4..65c31d7f9 100644 --- a/src/cmd/cli/command/config.go +++ b/src/cmd/cli/command/config.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "strings" @@ -164,13 +165,13 @@ var configSetCmd = &cobra.Command{ if err != nil { errs = append(errs, err) } else if ifNotSet && !didSet { - term.Info("Config", name, "is already set; skipping due to --if-not-set flag") + slog.Info(fmt.Sprintf("Config %s is already set; skipping due to --if-not-set flag", name)) } else { - term.Info("Updated value for", name) + slog.Info("Updated value for " + name) } } - term.Infof("Successfully set %d config value(s)", len(envMap)-len(errs)) + slog.Info(fmt.Sprintf("Successfully set %d config value(s)", len(envMap)-len(errs))) printDefangHint("To update the deployed values, do:", "compose up") return errors.Join(errs...) @@ -197,12 +198,12 @@ var configDeleteCmd = &cobra.Command{ if err := cli.ConfigDelete(cmd.Context(), projectName, session.Provider, names...); err != nil { // Show a warning (not an error) if the config was not found if connect.CodeOf(err) == connect.CodeNotFound { - term.Warn(client.PrettyError(err)) + slog.Warn(fmt.Sprintf("%v", client.PrettyError(err))) return nil } return err } - term.Info("Deleted", names) + slog.Info(fmt.Sprint("Deleted", names)) printDefangHint("To list the configs (but not their values), do:", "config ls") return nil diff --git a/src/cmd/cli/command/estimate.go b/src/cmd/cli/command/estimate.go index c81911c30..192532d05 100644 --- a/src/cmd/cli/command/estimate.go +++ b/src/cmd/cli/command/estimate.go @@ -2,6 +2,7 @@ package command import ( "fmt" + "log/slog" "github.com/AlecAivazis/survey/v2" "github.com/DefangLabs/defang/src/pkg" @@ -54,7 +55,7 @@ func makeEstimateCmd() *cobra.Command { if err != nil { return fmt.Errorf("failed to run estimate: %w", err) } - term.Debugf("Estimate: %+v", estimate) + slog.Debug("Estimate", "estimate", estimate) cli.PrintEstimate(global.Stack.Mode, estimate, term.DefaultTerm) diff --git a/src/cmd/cli/command/generate.go b/src/cmd/cli/command/generate.go index aabef1818..7579741a1 100644 --- a/src/cmd/cli/command/generate.go +++ b/src/cmd/cli/command/generate.go @@ -3,6 +3,8 @@ package command import ( "context" "errors" + "fmt" + "log/slog" "os/exec" "path/filepath" @@ -11,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/compose" "github.com/DefangLabs/defang/src/pkg/setup" "github.com/DefangLabs/defang/src/pkg/surveyor" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/spf13/cobra" ) @@ -51,12 +52,12 @@ var generateCmd = &cobra.Command{ } func afterGenerate(ctx context.Context, result setup.SetupResult) { - term.Info("Code generated successfully in folder", result.Folder) + slog.InfoContext(ctx, fmt.Sprint("Code generated successfully in folder", result.Folder)) editor := pkg.Getenv("DEFANG_EDITOR", "code") // TODO: should we use EDITOR env var instead? But won't handle terminal editors like vim cmdd := exec.Command(editor, result.Folder) err := cmdd.Start() if err != nil { - term.Debugf("unable to launch editor %q: %v", editor, err) + slog.Debug("unable to launch editor", "editor", editor, "err", err) } cd := "" @@ -68,7 +69,7 @@ func afterGenerate(ctx context.Context, result setup.SetupResult) { loader := compose.NewLoader(compose.WithPath(filepath.Join(result.Folder, "compose.yaml"))) project, err := loader.LoadProject(ctx) if err != nil { - term.Debugf("unable to load new project: %v", err) + slog.Debug("unable to load new project", "err", err) } var envInstructions []string diff --git a/src/cmd/cli/command/globals.go b/src/cmd/cli/command/globals.go index cf1b88ed6..f30b54b90 100644 --- a/src/cmd/cli/command/globals.go +++ b/src/cmd/cli/command/globals.go @@ -1,6 +1,7 @@ package command import ( + "log/slog" "os" "strconv" @@ -95,7 +96,7 @@ func NewGlobalConfig() *GlobalConfig { if fromEnv, ok := os.LookupEnv("DEFANG_COLOR"); ok { err := color.Set(fromEnv) if err != nil { - term.Debugf("invalid DEFANG_COLOR value: %v", err) + slog.Debug("invalid DEFANG_COLOR value", "err", err) } } @@ -103,7 +104,7 @@ func NewGlobalConfig() *GlobalConfig { if fromEnv, ok := os.LookupEnv("DEFANG_PROVIDER"); ok { err := provider.Set(fromEnv) if err != nil { - term.Debugf("invalid DEFANG_PROVIDER value: %v", err) + slog.Debug("invalid DEFANG_PROVIDER value", "err", err) } } @@ -111,7 +112,7 @@ func NewGlobalConfig() *GlobalConfig { if fromEnv, ok := os.LookupEnv("DEFANG_MODE"); ok { err := mode.Set(fromEnv) if err != nil { - term.Debugf("invalid DEFANG_MODE value: %v", err) + slog.Debug("invalid DEFANG_MODE value", "err", err) } } @@ -123,7 +124,7 @@ func NewGlobalConfig() *GlobalConfig { tenant = types.TenantNameOrID(fromEnv) } else if fromEnv, ok := os.LookupEnv("DEFANG_ORG"); ok { tenant = types.TenantNameOrID(fromEnv) - term.Warn("DEFANG_ORG is deprecated; use DEFANG_WORKSPACE instead") + slog.Warn("DEFANG_ORG is deprecated; use DEFANG_WORKSPACE instead") } return &GlobalConfig{ diff --git a/src/cmd/cli/command/login.go b/src/cmd/cli/command/login.go index 31da0c32b..f0fd80afa 100644 --- a/src/cmd/cli/command/login.go +++ b/src/cmd/cli/command/login.go @@ -1,8 +1,9 @@ package command import ( + "log/slog" + "github.com/DefangLabs/defang/src/pkg/login" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" "github.com/spf13/cobra" ) @@ -32,7 +33,7 @@ var loginCmd = &cobra.Command{ if err := global.Client.SetOptions(cmd.Context(), req); err != nil { return err } - term.Info("Options updated successfully") + slog.Info("Options updated successfully") } return nil }, diff --git a/src/cmd/cli/command/logout.go b/src/cmd/cli/command/logout.go index 3c3ca3c12..13caea704 100644 --- a/src/cmd/cli/command/logout.go +++ b/src/cmd/cli/command/logout.go @@ -1,8 +1,9 @@ package command import ( + "log/slog" + "github.com/DefangLabs/defang/src/pkg/cli" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/spf13/cobra" ) @@ -15,7 +16,7 @@ var logoutCmd = &cobra.Command{ if err := cli.Logout(cmd.Context(), global.Client, global.FabricAddr); err != nil { return err } - term.Info("Successfully logged out") + slog.Info("Successfully logged out") return nil }, } diff --git a/src/cmd/cli/command/mcp.go b/src/cmd/cli/command/mcp.go index a439491a5..4b982151a 100644 --- a/src/cmd/cli/command/mcp.go +++ b/src/cmd/cli/command/mcp.go @@ -2,11 +2,13 @@ package command import ( "fmt" + "log/slog" "os" "path/filepath" "github.com/DefangLabs/defang/src/pkg/agent/tools" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/mcp" "github.com/DefangLabs/defang/src/pkg/term" "github.com/mark3labs/mcp-go/server" @@ -32,22 +34,23 @@ var mcpServerCmd = &cobra.Command{ mcpClient, err := mcp.ParseMCPClient(ideClient) if err != nil { - term.Warnf("Unable to parse MCP client: %v", err) + slog.Warn(fmt.Sprintf("Unable to parse MCP client: %v", err)) mcpClient = mcp.MCPClientUnspecified } - term.Debug("Creating log file") + slog.Debug("Creating log file") logFile, err := os.OpenFile(filepath.Join(client.StateDir, "defang-mcp.log"), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) if err != nil { - term.Warnf("Failed to open log file: %v", err) + slog.Warn(fmt.Sprintf("Failed to open log file: %v", err)) } else { defer logFile.Close() term.DefaultTerm = term.NewTerm(os.Stdin, logFile, logFile) term.SetDebug(true) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) } // Create a new MCP server - term.Debug("Creating MCP server") + slog.Debug("Creating MCP server") s, err := mcp.NewDefangMCPServer(RootCmd.Version, mcpClient, tools.DefaultToolCLI{}, mcp.StackConfig{ FabricAddr: global.FabricAddr, Stack: &global.Stack, @@ -57,12 +60,12 @@ var mcpServerCmd = &cobra.Command{ } // Start the server - term.Println("Starting Defang MCP server") + fmt.Println("Starting Defang MCP server") if err := server.ServeStdio(s); err != nil { return err } - term.Println("Server shutdown") + fmt.Println("Server shutdown") return nil }, @@ -73,7 +76,7 @@ var mcpSetupCmd = &cobra.Command{ Short: "Setup MCP client for defang MCP server", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - term.Debug("Setting up MCP client") + slog.Debug("Setting up MCP client") client, _ := cmd.Flags().GetString("client") if client != "" { @@ -87,18 +90,18 @@ var mcpSetupCmd = &cobra.Command{ client = string(mcp.MCPClientWindsurf) } - term.Debugf("Using MCP client flag: %q", client) + slog.Debug("Using MCP client flag", "client", client) if err := mcp.SetupClient(client); err != nil { return err } } else { - term.Debugf("Using MCP client picker: %q", client) + slog.Debug("Using MCP client picker", "client", client) clients, err := mcp.SelectMCPclients() if err != nil { return err } for _, client := range clients { - term.Debugf("Selected MCP client using picker: %q", client) + slog.Debug("Selected MCP client using picker", "client", client) if err := mcp.SetupClient(client); err != nil { return err diff --git a/src/cmd/cli/command/session.go b/src/cmd/cli/command/session.go index 9766085b6..5aef33ef1 100644 --- a/src/cmd/cli/command/session.go +++ b/src/cmd/cli/command/session.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "path/filepath" "strings" @@ -41,7 +42,7 @@ func newCommandSessionWithOpts(cmd *cobra.Command, opts commandSessionOpts) (*se if !errors.Is(err, types.ErrComposeFileNotFound) { return nil, err } - term.Debugf("Could not create stack manager: %v", err) + slog.Debug("Could not create stack manager", "err", err) } sessionLoader := session.NewSessionLoader(global.Client, sm, options) session, err := sessionLoader.LoadSession(ctx) @@ -69,15 +70,15 @@ func newSessionLoaderOptionsForCommand(cmd *cobra.Command) session.SessionLoader var maybeProvider client.ProviderID if maybeProvider.Set(projectName) == nil && !cmd.Flag("provider").Changed { // using -p with a provider name instead of -P - term.Warnf("Project name %q looks like a provider name; did you mean to use -P=%s instead of -p?", projectName, projectName) + slog.Warn(fmt.Sprintf("Project name %q looks like a provider name; did you mean to use -P=%s instead of -p?", projectName, projectName)) doubleCheckProjectName(projectName) } else if strings.HasPrefix(projectName, "roject-name") { // -project-name= instead of --project-name - term.Warn("Did you mean to use --project-name instead of -project-name?") + slog.Warn("Did you mean to use --project-name instead of -project-name?") doubleCheckProjectName(projectName) } else if strings.HasPrefix(projectName, "rovider") { // -provider= instead of --provider - term.Warn("Did you mean to use --provider instead of -provider?") + slog.Warn("Did you mean to use --provider instead of -provider?") doubleCheckProjectName(projectName) } } @@ -113,7 +114,7 @@ func newStackManagerForLoader(ctx context.Context, loader *compose.Loader) (sess if !errors.Is(err, types.ErrComposeFileNotFound) { return nil, handleInvalidComposeFileErr(ctx, err) } - term.Debugf("Could not determine project working directory: %v", err) + slog.Debug("Could not determine project working directory", "err", err) // No project directory; look for .defang directory in current or parent directories targetDirectory, _ = findTargetDirectory(".") } else { @@ -124,7 +125,7 @@ func newStackManagerForLoader(ctx context.Context, loader *compose.Loader) (sess } projectName, _, err := loader.LoadProjectName(ctx) if err != nil { - term.Debugf("Could not determine project name: %v", err) + slog.Debug("Could not determine project name", "err", err) } sm, err := stacks.NewManager(global.Client, targetDirectory, projectName, ec) if err != nil { @@ -166,7 +167,7 @@ func handleInvalidComposeFileErr(ctx context.Context, loadErr error) error { return loadErr } - term.Error("Cannot load project:", loadErr) + slog.ErrorContext(ctx, fmt.Sprint("Cannot load project:", loadErr)) project, err := compose.NewLoader().CreateProjectForDebug() if err != nil { return fmt.Errorf("%w; original error: %w", err, loadErr) diff --git a/src/cmd/cli/command/stack.go b/src/cmd/cli/command/stack.go index 1c94d1811..54595626c 100644 --- a/src/cmd/cli/command/stack.go +++ b/src/cmd/cli/command/stack.go @@ -3,6 +3,7 @@ package command import ( "context" "fmt" + "log/slog" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli" @@ -92,7 +93,7 @@ func makeStackNewCmd() *cobra.Command { return fmt.Errorf("stack with name %q already exists in project %q", params.Name, projectName) } - term.Debugf("Creating stack with parameters: %+v\n", params) + slog.Debug("Creating stack with parameters", "params", params) _, err = stacks.CreateInDirectory(".", params) if err != nil { @@ -135,8 +136,8 @@ func makeStackListCmd() *cobra.Command { } if len(stacks) == 0 { - _, err = term.Infof("No Defang stacks found in the current directory.\n") - return err + slog.InfoContext(ctx, "No Defang stacks found in the current directory.") + return nil } columns := []string{"Name", "Default", "Provider", "Region", "Account", "Mode", "DeployedAt"} @@ -173,7 +174,7 @@ func makeStackDefaultCmd() *cobra.Command { return err } - term.Info(fmt.Sprintf("Stack %q is now the default stack for project %q\n", name, projectName)) + slog.InfoContext(ctx, fmt.Sprintf("Stack %q is now the default stack for project %q\n", name, projectName)) return nil }, } diff --git a/src/cmd/cli/command/stack_test.go b/src/cmd/cli/command/stack_test.go index 12e5a17b1..faf21325c 100644 --- a/src/cmd/cli/command/stack_test.go +++ b/src/cmd/cli/command/stack_test.go @@ -3,10 +3,12 @@ package command import ( "bytes" "context" + "log/slog" "os" "testing" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/modes" "github.com/DefangLabs/defang/src/pkg/stacks" "github.com/DefangLabs/defang/src/pkg/term" @@ -50,6 +52,7 @@ func MockTerm(t *testing.T, stdout *bytes.Buffer, stdin *bytes.Reader) { &FakeStdout{stdout}, new(bytes.Buffer), ) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) t.Cleanup(func() { term.DefaultTerm = oldTerm }) diff --git a/src/cmd/cli/command/whoami.go b/src/cmd/cli/command/whoami.go index 96353fb31..9364a26fd 100644 --- a/src/cmd/cli/command/whoami.go +++ b/src/cmd/cli/command/whoami.go @@ -1,6 +1,9 @@ package command import ( + "fmt" + "log/slog" + "github.com/DefangLabs/defang/src/pkg/auth" "github.com/DefangLabs/defang/src/pkg/cli" "github.com/DefangLabs/defang/src/pkg/cli/client" @@ -24,7 +27,7 @@ var whoamiCmd = &cobra.Command{ CheckAccountInfo: false, // because we do it inside cli.Whoami }) if err != nil { - term.Warnf("Provider account information not available: %v", err) + slog.WarnContext(ctx, fmt.Sprintf("Provider account information not available: %v", err)) } else { provider = session.Provider } @@ -37,7 +40,7 @@ var whoamiCmd = &cobra.Command{ userInfo, err = auth.FetchUserInfo(ctx, token) if err != nil { // Either the auth service is down, or we're using a Fabric JWT: skip workspace information - term.Warn("Workspace information unavailable:", err) + slog.WarnContext(ctx, fmt.Sprint("Workspace information unavailable:", err)) } } diff --git a/src/cmd/cli/command/workspace.go b/src/cmd/cli/command/workspace.go index b88182843..201f7b90d 100644 --- a/src/cmd/cli/command/workspace.go +++ b/src/cmd/cli/command/workspace.go @@ -2,6 +2,7 @@ package command import ( "errors" + "log/slog" "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/auth" @@ -28,7 +29,7 @@ func ListWorkspaces(cmd *cobra.Command, args []string) error { rows := cli.WorkspaceRows(info, currentWorkspace) if len(rows) == 0 { - term.Info("No workspaces found for this account.") + slog.Info("No workspaces found for this account.") return nil } diff --git a/src/pkg/agent/common/common.go b/src/pkg/agent/common/common.go index 71ff060ac..730cb00ce 100644 --- a/src/pkg/agent/common/common.go +++ b/src/pkg/agent/common/common.go @@ -3,10 +3,10 @@ package common import ( "errors" "fmt" + "log/slog" "os" "github.com/DefangLabs/defang/src/pkg/cli/compose" - "github.com/DefangLabs/defang/src/pkg/term" ) var MCPDevelopmentClient = "" // set by NewDefangMCPServer @@ -42,23 +42,23 @@ func ConfigureAgentLoader(params LoaderParams) (*compose.Loader, error) { projectName := params.ProjectName if projectName != "" { - term.Debugf("Project name provided: %s", projectName) - term.Debug("Function invoked: compose.NewLoader") + slog.Debug("Project name provided: " + projectName) + slog.Debug("Function invoked: compose.NewLoader") return compose.NewLoader(compose.WithProjectName(projectName)), nil } composeFilePaths := params.ComposeFilePaths if len(composeFilePaths) > 0 { - term.Debugf("Compose file paths provided: %s", composeFilePaths) - term.Debug("Function invoked: compose.NewLoader") + slog.Debug("Compose file paths provided", "paths", composeFilePaths) + slog.Debug("Function invoked: compose.NewLoader") return compose.NewLoader(compose.WithPath(composeFilePaths...)), nil } //TODO: Talk about using both project name and compose file paths // if projectNameOK && composeFilePathOK { - // term.Infof("Compose file paths and project name provided: %s, %s", composeFilePaths, projectName) + // slog.Info(fmt.Sprintf("Compose file paths and project name provided: %s, %s", composeFilePaths, projectName)) // return compose.NewLoader(compose.WithProjectName(projectName), compose.WithPath(composeFilePaths...)), nil // } - term.Debug("Function invoked: compose.NewLoader") + slog.Debug("Function invoked: compose.NewLoader") return compose.NewLoader(), nil } diff --git a/src/pkg/agent/generator.go b/src/pkg/agent/generator.go index 860a95693..c94f122f4 100644 --- a/src/pkg/agent/generator.go +++ b/src/pkg/agent/generator.go @@ -4,8 +4,8 @@ import ( "context" "encoding/json" "errors" + "log/slog" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/firebase/genkit/go/ai" "github.com/firebase/genkit/go/genkit" ) @@ -73,7 +73,7 @@ func (g *Generator) HandleMessage(ctx context.Context, prompt string, maxTurns i if errors.Is(err, context.Canceled) { return err } - term.Debugf("error: %v", err) + slog.Debug("generate error", "err", err) continue } diff --git a/src/pkg/agent/plugins/compat_oai/generate.go b/src/pkg/agent/plugins/compat_oai/generate.go index d905d10f7..316e95889 100644 --- a/src/pkg/agent/plugins/compat_oai/generate.go +++ b/src/pkg/agent/plugins/compat_oai/generate.go @@ -19,9 +19,9 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "strings" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/firebase/genkit/go/ai" "github.com/openai/openai-go" "github.com/openai/openai-go/packages/param" @@ -258,7 +258,7 @@ func (g *ModelGenerator) generateStream(ctx context.Context, handleChunk func(co if err != nil { return nil, fmt.Errorf("failed to marshal request params for debug: %w", err) } - _, _ = term.Debugf("Chat.Completions.NewStreaming: %s", string(reqParams)) + slog.Debug("Chat.Completions.NewStreaming: " + string(reqParams)) stream := g.client.Chat.Completions.NewStreaming(ctx, *g.request) defer stream.Close() diff --git a/src/pkg/agent/toolmanager.go b/src/pkg/agent/toolmanager.go index 4242b56d5..fd8306fa4 100644 --- a/src/pkg/agent/toolmanager.go +++ b/src/pkg/agent/toolmanager.go @@ -5,9 +5,9 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/agent/common" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/firebase/genkit/go/ai" "github.com/firebase/genkit/go/genkit" ) @@ -124,7 +124,7 @@ func (t *ToolManager) EqualPrevious(toolRequests []*ai.ToolRequest) bool { for _, req := range toolRequests { inputs, err := json.Marshal(req.Input) if err != nil { - term.Debugf("error marshaling tool request input: %v", err) + slog.Debug("error marshaling tool request input", "err", err) continue } currJSON := fmt.Sprintf("%s:%s", req.Name, inputs) diff --git a/src/pkg/agent/tools/deploy.go b/src/pkg/agent/tools/deploy.go index 4a0bbf475..806a1995e 100644 --- a/src/pkg/agent/tools/deploy.go +++ b/src/pkg/agent/tools/deploy.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -14,7 +15,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/modes" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type DeployParams struct { @@ -22,7 +22,7 @@ type DeployParams struct { } func HandleDeployTool(ctx context.Context, loader client.Loader, params DeployParams, cli CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: loader.LoadProject") + slog.Debug("Function invoked: loader.LoadProject") project, err := cli.LoadProject(ctx, loader) if err != nil { err = fmt.Errorf("failed to parse compose file: %w", err) @@ -30,7 +30,7 @@ func HandleDeployTool(ctx context.Context, loader client.Loader, params DeployPa return "", fmt.Errorf("local deployment failed: %v. Please provide a valid compose file path.", err) } - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -57,9 +57,9 @@ func HandleDeployTool(ctx context.Context, loader client.Loader, params DeployPa } // Deploy the services - term.Debugf("Deploying services for project %s...", project.Name) + slog.Debug("Deploying services for project", "project", project.Name) - term.Debug("Function invoked: cli.ComposeUp") + slog.Debug("Function invoked: cli.ComposeUp") // Use ComposeUp to deploy the services deployResp, project, err := cli.ComposeUp(ctx, client, provider, sc.Stack, cliTypes.ComposeUpParams{ Project: project, diff --git a/src/pkg/agent/tools/destroy.go b/src/pkg/agent/tools/destroy.go index b8944b0e9..3e848452d 100644 --- a/src/pkg/agent/tools/destroy.go +++ b/src/pkg/agent/tools/destroy.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type DestroyParams struct { @@ -19,7 +19,7 @@ type DestroyParams struct { } func HandleDestroyTool(ctx context.Context, loader client.Loader, params DestroyParams, cli CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -39,7 +39,7 @@ func HandleDestroyTool(ctx context.Context, loader client.Loader, params Destroy if err != nil { return "", fmt.Errorf("failed to setup provider: %w", err) } - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) @@ -50,7 +50,7 @@ func HandleDestroyTool(ctx context.Context, loader client.Loader, params Destroy return "", fmt.Errorf("failed to use provider: %w", err) } - term.Debug("Function invoked: cli.ComposeDown") + slog.Debug("Function invoked: cli.ComposeDown") deployment, err := cli.ComposeDown(ctx, projectName, client, provider) if err != nil { if connect.CodeOf(err) == connect.CodeNotFound { diff --git a/src/pkg/agent/tools/estimate.go b/src/pkg/agent/tools/estimate.go index 9c041e7cb..464354776 100644 --- a/src/pkg/agent/tools/estimate.go +++ b/src/pkg/agent/tools/estimate.go @@ -4,12 +4,12 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/agent/common" "github.com/DefangLabs/defang/src/pkg/auth" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/modes" - "github.com/DefangLabs/defang/src/pkg/term" ) type EstimateParams struct { @@ -20,14 +20,14 @@ type EstimateParams struct { } func HandleEstimateTool(ctx context.Context, loader client.Loader, params EstimateParams, cli CLIInterface, sc StackConfig) (string, error) { - term.Debug("Function invoked: loader.LoadProject") + slog.Debug("Function invoked: loader.LoadProject") project, err := cli.LoadProject(ctx, loader) if err != nil { err = fmt.Errorf("failed to parse compose file: %w", err) return "", fmt.Errorf("failed to parse compose file: %w", err) } - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") fabric, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -51,12 +51,12 @@ func HandleEstimateTool(ctx context.Context, loader client.Loader, params Estima return "", err } - term.Debug("Function invoked: cli.RunEstimate") + slog.Debug("Function invoked: cli.RunEstimate") estimate, err := cli.RunEstimate(ctx, project, fabric, defangProvider, providerID, params.Region, deploymentMode) if err != nil { return "", fmt.Errorf("failed to run estimate: %w", err) } - term.Debugf("Estimate: %+v", estimate) + slog.Debug("Estimate", "estimate", estimate) estimateText := cli.PrintEstimate(deploymentMode, estimate) diff --git a/src/pkg/agent/tools/listConfig.go b/src/pkg/agent/tools/listConfig.go index 21a426f70..e1db27553 100644 --- a/src/pkg/agent/tools/listConfig.go +++ b/src/pkg/agent/tools/listConfig.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type ListConfigParams struct { @@ -20,7 +20,7 @@ type ListConfigParams struct { // HandleListConfigTool handles the list config tool logic func HandleListConfigTool(ctx context.Context, loader client.Loader, params ListConfigParams, cli CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -41,14 +41,14 @@ func HandleListConfigTool(ctx context.Context, loader client.Loader, params List return "", fmt.Errorf("failed to setup provider: %w", err) } - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) } - term.Debug("Project name loaded:", projectName) + slog.Debug(fmt.Sprint("Project name loaded:", projectName)) - term.Debug("Function invoked: cli.ConfigList") + slog.Debug("Function invoked: cli.ConfigList") config, err := cli.ListConfig(ctx, provider, projectName) if err != nil { return "", fmt.Errorf("failed to list config variables: %w", err) diff --git a/src/pkg/agent/tools/logs.go b/src/pkg/agent/tools/logs.go index 75724970d..c904329e9 100644 --- a/src/pkg/agent/tools/logs.go +++ b/src/pkg/agent/tools/logs.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "time" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -13,7 +14,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/timeutils" ) @@ -41,7 +41,7 @@ func HandleLogsTool(ctx context.Context, loader client.Loader, params LogsParams } } - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -62,12 +62,12 @@ func HandleLogsTool(ctx context.Context, loader client.Loader, params LogsParams return "", fmt.Errorf("failed to setup provider: %w", err) } - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) } - term.Debug("Project name loaded:", projectName) + slog.Debug(fmt.Sprint("Project name loaded:", projectName)) err = cli.CanIUseProvider(ctx, client, provider, projectName, 0) if err != nil { @@ -86,7 +86,7 @@ func HandleLogsTool(ctx context.Context, loader client.Loader, params LogsParams }) if err != nil { - term.Error("Failed to fetch logs", "error", err) + slog.ErrorContext(ctx, fmt.Sprint("Failed to fetch logs", "error", err)) return "", fmt.Errorf("failed to fetch logs: %w", err) } diff --git a/src/pkg/agent/tools/provider.go b/src/pkg/agent/tools/provider.go index 4f1c670e5..d602d4719 100644 --- a/src/pkg/agent/tools/provider.go +++ b/src/pkg/agent/tools/provider.go @@ -3,11 +3,11 @@ package tools import ( "context" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) const CreateNewStack = "Create new stack" @@ -48,7 +48,7 @@ func (pp *providerPreparer) SetupProvider(ctx context.Context, stack *stacks.Par } } - term.Debug("Function invoked: cli.NewProvider") + slog.Debug("Function invoked: cli.NewProvider") provider := pp.pc.NewProvider(ctx, stack.Provider, pp.fc, stack.Name) providerID := stack.Provider return &providerID, provider, nil diff --git a/src/pkg/agent/tools/removeConfig.go b/src/pkg/agent/tools/removeConfig.go index e16751a65..956bc817a 100644 --- a/src/pkg/agent/tools/removeConfig.go +++ b/src/pkg/agent/tools/removeConfig.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type RemoveConfigParams struct { @@ -21,7 +21,7 @@ type RemoveConfigParams struct { // HandleRemoveConfigTool handles the remove config tool logic func HandleRemoveConfigTool(ctx context.Context, loader client.Loader, params RemoveConfigParams, cli CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -41,7 +41,7 @@ func HandleRemoveConfigTool(ctx context.Context, loader client.Loader, params Re if err != nil { return "", fmt.Errorf("failed to setup provider: %w", err) } - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) diff --git a/src/pkg/agent/tools/services.go b/src/pkg/agent/tools/services.go index ac9687d61..89bb956d9 100644 --- a/src/pkg/agent/tools/services.go +++ b/src/pkg/agent/tools/services.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "strings" "connectrpc.com/connect" @@ -14,7 +15,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type ServicesParams struct { @@ -22,7 +22,7 @@ type ServicesParams struct { } func HandleServicesTool(ctx context.Context, loader client.Loader, params ServicesParams, cli CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -42,15 +42,15 @@ func HandleServicesTool(ctx context.Context, loader client.Loader, params Servic if err != nil { return "", fmt.Errorf("failed to setup provider: %w", err) } - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) - term.Debugf("Project name loaded: %s", projectName) if err != nil { if strings.Contains(err.Error(), "no projects found") { return "no projects found on Playground", nil } return "", fmt.Errorf("failed to load project name: %w", err) } + slog.Debug("Project name loaded: " + projectName) serviceResponse, err := cli.GetServices(ctx, projectName, provider) if err != nil { @@ -68,7 +68,7 @@ func HandleServicesTool(ctx context.Context, loader client.Loader, params Servic // Convert to JSON jsonData, jsonErr := json.Marshal(serviceResponse) if jsonErr == nil { - term.Debugf("Successfully loaded services with count: %d", len(serviceResponse)) + slog.Debug("Successfully loaded services", "count", len(serviceResponse)) return string(jsonData) + "\nIf you would like to see more details about your deployed projects, please visit the Defang portal at https://portal.defang.io/projects", nil } diff --git a/src/pkg/agent/tools/setConfig.go b/src/pkg/agent/tools/setConfig.go index 2a26fed0a..250625ed6 100644 --- a/src/pkg/agent/tools/setConfig.go +++ b/src/pkg/agent/tools/setConfig.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -12,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type SetConfigParams struct { @@ -23,7 +23,7 @@ type SetConfigParams struct { } func HandleSetConfig(ctx context.Context, loader client.Loader, params SetConfigParams, cliInterface CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cliInterface, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -45,7 +45,7 @@ func HandleSetConfig(ctx context.Context, loader client.Loader, params SetConfig } if params.ProjectName == "" { - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cliInterface.LoadProjectNameWithFallback(ctx, loader, provider) if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) @@ -63,10 +63,10 @@ func HandleSetConfig(ctx context.Context, loader client.Loader, params SetConfig return "", errors.New("Both 'random' and 'value' parameters provided; please provide only one") } value = cli.CreateRandomConfigValue() - term.Debug("Generated random value for config") + slog.Debug("Generated random value for config") } - term.Debug("Function invoked: cli.ConfigSet") + slog.Debug("Function invoked: cli.ConfigSet") if err := cliInterface.ConfigSet(ctx, params.ProjectName, provider, params.Name, value); err != nil { return "", fmt.Errorf("failed to set config: %w", err) } diff --git a/src/pkg/auth/auth.go b/src/pkg/auth/auth.go index b51d6bccc..bd6424189 100644 --- a/src/pkg/auth/auth.go +++ b/src/pkg/auth/auth.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/url" "path" "strings" @@ -57,7 +58,7 @@ func StartAuthCodeFlow(ctx context.Context, mcpFlow LoginFlow, saveToken func(st // Create a shortened authorize URL by only including the variable parts (state and code_challenge) authorizeUrl := GetAuthorizeUrl("cli", ar.state, ar.challenge) - term.Println("Please visit the following URL to log in: (Right click the URL or press ENTER to open browser)") + fmt.Println("Please visit the following URL to log in: (Right click the URL or press ENTER to open browser)") n, _ := term.Printf(" %s", authorizeUrl) defer term.Print("\r", strings.Repeat(" ", n), "\r") // TODO: use termenv to clear line @@ -72,13 +73,13 @@ func StartAuthCodeFlow(ctx context.Context, mcpFlow LoginFlow, saveToken func(st ctx := context.Background() code, err := pollForAuthCode(ctx, ar.state) if err != nil { - term.Errorf("failed to poll for auth code: %v", err) + slog.ErrorContext(ctx, fmt.Sprintf("failed to poll for auth code: %v", err)) return } token, err := ExchangeCodeForToken(ctx, AuthCodeFlow{code: code, redirectUri: redirectUri, verifier: ar.verifier}) if err != nil { - term.Errorf("failed to exchange code for token: %v", err) + slog.ErrorContext(ctx, fmt.Sprintf("failed to exchange code for token: %v", err)) return } @@ -111,12 +112,12 @@ func Poll(ctx context.Context, key string) ([]byte, error) { result, err := OpenAuthClient.Poll(ctx, key) if err != nil { if errors.Is(err, ErrPollTimeout) { - term.Debug("poll timed out, retrying...") + slog.Debug("poll timed out, retrying...") continue } var unexpectedError ErrUnexpectedStatus if errors.As(err, &unexpectedError) && unexpectedError.StatusCode >= 500 { - term.Debugf("received server error: %s, retrying in %v...", unexpectedError.Status, retryDelay) + slog.Debug("received server error, retrying", "status", unexpectedError.Status, "retryDelay", retryDelay) select { case <-ctx.Done(): return nil, ctx.Err() @@ -161,7 +162,7 @@ func ExchangeCodeForToken(ctx context.Context, code AuthCodeFlow, ss ...scope.Sc scopes = append(scopes, s.String()) } - term.Debugf("Generating access token with scopes %v", scopes) + slog.Debug("Generating access token", "scopes", scopes) token, err := OpenAuthClient.Exchange(code.code, code.redirectUri, code.verifier) // TODO: scope if err != nil { diff --git a/src/pkg/cli/cd.go b/src/pkg/cli/cd.go index 33f8601de..e260fef0b 100644 --- a/src/pkg/cli/cd.go +++ b/src/pkg/cli/cd.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "slices" "strings" @@ -21,9 +22,9 @@ import ( func CdCommand(ctx context.Context, projectName string, provider client.Provider, fabric client.FabricClient, command client.CdCommand) (types.ETag, error) { if projectName == "" { // projectName is empty for "list --remote" - term.Infof("Running CD command %q", command) + slog.InfoContext(ctx, fmt.Sprintf("Running CD command %q", command)) } else { - term.Infof("Running CD command %q in project %q", command, projectName) + slog.InfoContext(ctx, fmt.Sprintf("Running CD command %q in project %q", command, projectName)) } if dryrun.DoDryRun { return "", dryrun.ErrDryRun @@ -48,7 +49,7 @@ func CdCommand(ctx context.Context, projectName string, provider client.Provider case client.CdCommandDown, client.CdCommandDestroy: err := deleteSubdomain(ctx, projectName, provider, fabric) if err != nil { - term.Warn("Unable to update deployment history; deployment will proceed anyway.") + slog.WarnContext(ctx, "Unable to update deployment history; deployment will proceed anyway.") break } // Update deployment table to mark deployment as destroyed only after successful deletion of the subdomain @@ -65,8 +66,8 @@ func CdCommand(ctx context.Context, projectName string, provider client.Provider StatesUrl: statesUrl, }) if err != nil { - term.Debug("Failed to record deployment:", err) - term.Warn("Unable to update deployment history; deployment will proceed anyway.") + slog.Debug("Failed to record deployment", "err", err) + slog.WarnContext(ctx, "Unable to update deployment history; deployment will proceed anyway.") } } return cd.ETag, nil @@ -80,9 +81,9 @@ func deleteSubdomain(ctx context.Context, projectName string, provider client.Pr }) if err != nil { // This can fail when the project was deployed from a different workspace than the current one - term.Debug("DeleteSubdomainZone failed:", err) + slog.Debug(fmt.Sprint("DeleteSubdomainZone failed:", err)) if connect.CodeOf(err) == connect.CodeNotFound { - term.Warn("Subdomain not found; did you mean to destroy a different project or stack?") + slog.WarnContext(ctx, "Subdomain not found; did you mean to destroy a different project or stack?") } return err } @@ -121,7 +122,7 @@ func TailAndWaitForCD(ctx context.Context, provider client.Provider, projectName // blocking call to tail var tailErr error if err := streamLogs(ctx, provider, projectName, tailOptions, logEntryPrintHandler); err != nil { - term.Debug("Tail stopped with", err, errors.Unwrap(err)) + slog.Debug(fmt.Sprint("Tail stopped with", err, errors.Unwrap(err))) if !errors.Is(err, context.Canceled) { tailErr = err } @@ -136,7 +137,7 @@ func SplitProjectStack(name string) (projectName string, stackName string) { } func CdListFromStorage(ctx context.Context, provider client.Provider, allRegions bool) error { - term.Debug("Running CD list") + slog.Debug("Running CD list") if dryrun.DoDryRun { return dryrun.ErrDryRun } @@ -156,7 +157,7 @@ func CdListFromStorage(ctx context.Context, provider client.Provider, allRegions if allRegions { accountInfo.Region = "" } - term.Printf("No projects found in %v\n", accountInfo) + slog.InfoContext(ctx, fmt.Sprintf("No projects found in %v", accountInfo)) } return term.Table(stacks, "Project", "Stack", "Workspace", "CdRegion") diff --git a/src/pkg/cli/cert.go b/src/pkg/cli/cert.go index 954958663..93f5c5787 100644 --- a/src/pkg/cli/cert.go +++ b/src/pkg/cli/cert.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net" "net/http" "strings" @@ -69,7 +70,7 @@ var ( ExpectContinueTimeout: 1 * time.Second, }, CheckRedirect: func(req *http.Request, via []*http.Request) error { - term.Debugf("Redirecting from %v to %v", via[len(via)-1].URL, req.URL) + slog.Debug("Redirecting", "from", via[len(via)-1].URL, "to", req.URL) return nil }, } @@ -77,7 +78,7 @@ var ( ) func GenerateLetsEncryptCert(ctx context.Context, project *compose.Project, client client.FabricClient, provider client.Provider) error { - term.Debugf("Generating TLS cert for project %q", project.Name) + slog.Debug("Generating TLS cert for project", "project", project.Name) services, err := provider.GetServices(ctx, &defangv1.GetServicesRequest{Project: project.Name}) if err != nil { @@ -95,7 +96,7 @@ func GenerateLetsEncryptCert(ctx context.Context, project *compose.Project, clie } if service, ok := project.Services[serviceInfo.Service.Name]; ok { if service.DomainName != serviceInfo.Domainname { - term.Warnf("service %q: domainname %q in compose file does not match deployed value %q", service.Name, service.DomainName, serviceInfo.Domainname) + slog.WarnContext(ctx, fmt.Sprintf("service %q: domainname %q in compose file does not match deployed value %q", service.Name, service.DomainName, serviceInfo.Domainname)) } cnt++ targets := getDomainTargets(serviceInfo, service) @@ -103,14 +104,14 @@ func GenerateLetsEncryptCert(ctx context.Context, project *compose.Project, clie if defaultNetwork := service.Networks["default"]; defaultNetwork != nil { domains = append(domains, defaultNetwork.Aliases...) } - term.Debugf("Found service %v with domains %v and targets %v", service.Name, domains, targets) + slog.Debug("Found service with domains and targets", "service", service.Name, "domains", domains, "targets", targets) for _, domain := range domains { generateCert(ctx, domain, targets, client) } } } if cnt == 0 { - term.Infof("No `domainname` found in compose file; no HTTPS cert generation needed") + slog.InfoContext(ctx, "No `domainname` found in compose file; no HTTPS cert generation needed") } return nil @@ -132,35 +133,35 @@ func getDomainTargets(serviceInfo *defangv1.ServiceInfo, service compose.Service } func generateCert(ctx context.Context, domain string, targets []string, client client.FabricClient) { - term.Infof("Checking DNS setup for %v", domain) + slog.InfoContext(ctx, fmt.Sprintf("Checking DNS setup for %v", domain)) if err := waitForCNAME(ctx, domain, targets, client); err != nil { - term.Errorf("Error waiting for CNAME: %v", err) + slog.ErrorContext(ctx, fmt.Sprintf("Error waiting for CNAME: %v", err)) return } - term.Infof("%v DNS is properly configured!", domain) + slog.InfoContext(ctx, fmt.Sprintf("%v DNS is properly configured!", domain)) if err := cert.CheckTLSCert(ctx, domain); err == nil { - term.Infof("TLS cert for %v is already ready", domain) + slog.InfoContext(ctx, fmt.Sprintf("TLS cert for %v is already ready", domain)) return } if err := pkg.SleepWithContext(ctx, 5*time.Second); err != nil { // slight delay to ensure DNS to propagate - term.Errorf("Error waiting for DNS propagation: %v", err) + slog.ErrorContext(ctx, fmt.Sprintf("Error waiting for DNS propagation: %v", err)) return } - term.Infof("Triggering cert generation for %v", domain) + slog.InfoContext(ctx, fmt.Sprintf("Triggering cert generation for %v", domain)) if err := triggerCertGeneration(ctx, domain); err != nil { - term.Errorf("Error triggering cert generation, please try again") + slog.ErrorContext(ctx, "Error triggering cert generation, please try again", "domain", domain, "err", err) return } - term.Infof("Waiting for TLS cert to be online for %v, this could take a few minutes", domain) + slog.InfoContext(ctx, fmt.Sprintf("Waiting for TLS cert to be online for %v, this could take a few minutes", domain)) if err := waitForTLS(ctx, domain); err != nil { - term.Errorf("Error waiting for TLS to be online: %v", err) + slog.ErrorContext(ctx, fmt.Sprintf("Error waiting for TLS to be online: %v", err)) // FIXME: Add more info on how to debug, possibly provided by the server side to avoid client type detection here return } - term.Infof("TLS cert for %v is ready\n", domain) + slog.InfoContext(ctx, fmt.Sprintf("TLS cert for %v is ready\n", domain)) } func triggerCertGeneration(ctx context.Context, domain string) error { @@ -176,7 +177,7 @@ func triggerCertGeneration(ctx context.Context, domain string) error { // Our own retry logic uses the root resolver to prevent cached DNS and retry on all non-200 errors if err := getWithRetries(ctx, fmt.Sprintf("http://%v", domain), 5); err != nil { // Retry incase of DNS error // Ignore possible tls error as cert attachment may take time - term.Debugf("Error triggering cert generation: %v", err) + slog.Debug("Error triggering cert generation", "err", err) return err } return nil @@ -205,7 +206,7 @@ func waitForTLS(ctx context.Context, domain string) error { if err := cert.CheckTLSCert(timeout, domain); err == nil { return nil } else { - term.Debugf("Error checking TLS cert for %v: %v", domain, err) + slog.Debug("Error checking TLS cert", "domain", domain, "err", err) } } } @@ -234,24 +235,24 @@ func waitForCNAME(ctx context.Context, domain string, targets []string, client c verifyDNS := func() error { if !serverSideVerified && serverVerifyRpcFailure < 3 { if err := client.VerifyDNSSetup(ctx, &defangv1.VerifyDNSSetupRequest{Domain: domain, Targets: targets}); err == nil { - term.Debugf("Server side DNS verification for %v successful", domain) + slog.Debug("Server side DNS verification successful", "domain", domain) serverSideVerified = true } else { if cerr := new(connect.Error); errors.As(err, &cerr) && cerr.Code() == connect.CodeFailedPrecondition { - term.Debugf("Server side DNS verification negative result: %v", cerr.Message()) + slog.Debug("Server side DNS verification negative result", "message", cerr.Message()) } else { - term.Debugf("Server side DNS verification request for %v failed: %v", domain, err) + slog.Debug("Server side DNS verification request failed", "domain", domain, "error", err) serverVerifyRpcFailure++ } } if serverVerifyRpcFailure >= 3 { - term.Warnf("Server side DNS verification for %v failed multiple times, skipping server side DNS verification.", domain) + slog.WarnContext(ctx, fmt.Sprintf("Server side DNS verification for %v failed multiple times, skipping server side DNS verification.", domain)) } } if serverSideVerified || serverVerifyRpcFailure >= 3 { locallyVerified := dns.CheckDomainDNSReady(ctx, domain, targets) if serverSideVerified && !locallyVerified { - term.Warnf("DNS settings for %v are verified, but changes may take a few minutes to propagate due to caching.", domain) + slog.WarnContext(ctx, fmt.Sprintf("DNS settings for %v are verified, but changes may take a few minutes to propagate due to caching.", domain)) return nil } if locallyVerified { @@ -264,9 +265,9 @@ func waitForCNAME(ctx context.Context, domain string, targets []string, client c if err := verifyDNS(); err == nil { return nil } - term.Infof("Configure a CNAME or ALIAS record for the domain name: %v", domain) + slog.InfoContext(ctx, fmt.Sprintf("Configure a CNAME or ALIAS record for the domain name: %v", domain)) term.Printf(" %v -> %v\n", domain, strings.Join(targets, " or ")) - term.Infof("Awaiting DNS record setup and propagation... This may take a while.") + slog.InfoContext(ctx, "Awaiting DNS record setup and propagation... This may take a while.") for { select { @@ -295,18 +296,18 @@ func getWithRetries(ctx context.Context, url string, tries int) error { return nil } if resp != nil && resp.Request != nil && resp.Request.URL.Scheme == "https" { - term.Debugf("cert gen request success, received redirect to %v", resp.Request.URL) + slog.Debug("cert gen request success, received redirect", "url", resp.Request.URL) return nil // redirect to https indicate a successful cert generation } if err == nil { err = fmt.Errorf("HTTP: %v", resp.StatusCode) } } else if cve := new(tls.CertificateVerificationError); errors.As(err, &cve) { - term.Debugf("cert gen request success, received tls error: %v", cve) + slog.Debug("cert gen request success, received tls error", "err", cve) return nil // tls error indicate a successful cert gen trigger, as it has to be redirected to https } - term.Debugf("Error fetching %v: %v, tries left %v", url, err, tries-i-1) + slog.Debug("Error fetching url", "url", url, "err", err, "triesLeft", tries-i-1) errs = append(errs, err) delay := httpRetryDelayBase << i // Simple exponential backoff diff --git a/src/pkg/cli/client/byoc/aws/alb_logs.go b/src/pkg/cli/client/byoc/aws/alb_logs.go index 3228cbdcf..8991bf925 100644 --- a/src/pkg/cli/client/byoc/aws/alb_logs.go +++ b/src/pkg/cli/client/byoc/aws/alb_logs.go @@ -8,12 +8,12 @@ import ( "fmt" "io" "iter" + "log/slog" "slices" "strings" "time" "github.com/DefangLabs/defang/src/pkg/clouds/aws/cw" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" ) @@ -34,7 +34,7 @@ func (b *ByocAws) fetchAndStreamAlbLogs(ctx context.Context, projectName string, if b.Prefix != "" { bucketPrefix = b.Prefix + "-" + bucketPrefix } - term.Debug("Query ALB logs", bucketPrefix) + slog.Debug(fmt.Sprint("Query ALB logs", bucketPrefix)) if len(bucketPrefix) > 31 { // HACK: AWS CD truncates the ALB name to 31 characters (because of the long Terraform suffix) bucketPrefix = bucketPrefix[:31] diff --git a/src/pkg/cli/client/byoc/aws/byoc.go b/src/pkg/cli/client/byoc/aws/byoc.go index babf9950b..aee131df8 100644 --- a/src/pkg/cli/client/byoc/aws/byoc.go +++ b/src/pkg/cli/client/byoc/aws/byoc.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "iter" + "log/slog" "os" "path/filepath" "strconv" @@ -91,7 +92,7 @@ func AnnotateAwsError(err error) error { if err == nil { return nil } - term.Debug("AWS error:", err) + slog.Debug(fmt.Sprint("AWS error:", err)) if strings.Contains(err.Error(), "missing AWS region:") { return ErrMissingAwsRegion{err} } @@ -120,11 +121,11 @@ func NewByocProvider(ctx context.Context, tenantName types.TenantLabel, stack st AWSSecretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY") switch { case AWSAccessKeyID != "" && AWSSecretAccessKey != "": - term.Warnf("Both AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY and AWS_PROFILE (%q) are set; access keys take precedence and AWS_PROFILE will be ignored", awsProfileName) + slog.WarnContext(ctx, fmt.Sprintf("Both AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY and AWS_PROFILE (%q) are set; access keys take precedence and AWS_PROFILE will be ignored", awsProfileName)) case AWSAccessKeyID != "" && AWSSecretAccessKey == "": - term.Warnf("Partial credentials found in env, missing: AWS_SECRET_ACCESS_KEY; using AWS_PROFILE (%q) instead", awsProfileName) + slog.WarnContext(ctx, fmt.Sprintf("Partial credentials found in env, missing: AWS_SECRET_ACCESS_KEY; using AWS_PROFILE (%q) instead", awsProfileName)) case AWSAccessKeyID == "" && AWSSecretAccessKey != "": - term.Warnf("Partial credentials found in env, missing: AWS_ACCESS_KEY_ID; using AWS_PROFILE (%q) instead", awsProfileName) + slog.WarnContext(ctx, fmt.Sprintf("Partial credentials found in env, missing: AWS_ACCESS_KEY_ID; using AWS_PROFILE (%q) instead", awsProfileName)) } } @@ -154,7 +155,7 @@ func (b *ByocAws) SetUpCD(ctx context.Context, force bool) error { return nil } - term.Debugf("Using CD image: %q", b.CDImage) + slog.Debug("Using CD image", "image", b.CDImage) _, err := b.driver.SetUp(ctx, force) if err != nil { @@ -266,13 +267,13 @@ func (b *ByocAws) deploy(ctx context.Context, req *client.DeployRequest, cmd str } if b.needDockerHubCreds { - term.Debugf("Docker Hub credentials are needed for image pulls") + slog.Debug("Docker Hub credentials are needed for image pulls") dockerHubUser, dockerHubPass, err := dockerhub.GetDockerHubCredentials(ctx) if err != nil { - term.Debugf("Could not retrieve Docker Hub credentials: %v", err) - term.Warnf("Docker Hub credentials are required to avoid pull throttling. Please run `docker login` or set the DOCKERHUB_USERNAME and DOCKERHUB_TOKEN environment variables. Without valid credentials, image pulls may be rate-limited or fail.") + slog.Debug("Could not retrieve Docker Hub credentials", "err", err) + slog.WarnContext(ctx, "Docker Hub credentials are required to avoid pull throttling. Please run `docker login` or set the DOCKERHUB_USERNAME and DOCKERHUB_TOKEN environment variables. Without valid credentials, image pulls may be rate-limited or fail.") } else { - term.Debugf("Using Docker Hub credentials with user %v", dockerHubUser) + slog.Debug("Using Docker Hub credentials", "user", dockerHubUser) cdCmd.dockerHubUsername = dockerHubUser cdCmd.dockerHubAccessToken = dockerHubPass } @@ -288,7 +289,7 @@ func (b *ByocAws) deploy(ctx context.Context, req *client.DeployRequest, cmd str for _, si := range serviceInfos { if si.UseAcmeCert { - term.Infof("To activate TLS certificate for %v, run 'defang cert gen'", si.Domainname) + slog.InfoContext(ctx, fmt.Sprintf("To activate TLS certificate for %v, run 'defang cert gen'", si.Domainname)) } } @@ -364,7 +365,7 @@ func (b *ByocAws) checkRequiresDockerHubToken(ctx context.Context, project *comp found, err := b.driver.CheckImageExistOnPublicECR(ctx, ecrRepo, tag) if err != nil { - term.Debugf("Error checking image %q on Public ECR: %v, assuming credentials needed", image, err) + slog.Debug("Error checking image on Public ECR, assuming credentials needed", "image", image, "err", err) found = false } if !found { @@ -378,7 +379,7 @@ func (b *ByocAws) checkRequiresDockerHubToken(ctx context.Context, project *comp } if len(missingDockerhubImages) > 0 { b.needDockerHubCreds = true - term.Debugf("Docker Hub images not found on Public ECR: %v", missingDockerhubImages) + slog.Debug("Docker Hub images not found on Public ECR", "images", missingDockerhubImages) track.Evt("NeedsDockerHubCreds", track.P("images", strings.Join(missingDockerhubImages, ","))) } return nil @@ -411,7 +412,7 @@ func (b *ByocAws) findZone(ctx context.Context, domain, roleARN string) (string, return "", err } if len(zones) > 1 { - term.Warnf("Multiple hosted zones found for domain %q, using the first one: %v", domain, zones[0].Id) + slog.WarnContext(ctx, fmt.Sprintf("Multiple hosted zones found for domain %q, using the first one: %v", domain, zones[0].Id)) } return *zones[0].Id, nil } @@ -551,10 +552,10 @@ func (b *ByocAws) runCdCommand(ctx context.Context, cmd cdCommand) (awscodebuild if cmd.dockerHubUsername != "" && cmd.dockerHubAccessToken != "" { arn, err := b.putDockerHubSecret(ctx, cmd.project, cmd.dockerHubUsername, cmd.dockerHubAccessToken) if err != nil { - term.Warnf("Could not store Docker Hub credentials in Secrets Manager, images from dockerhub may be throttled during build: %v", err) + slog.WarnContext(ctx, fmt.Sprintf("Could not store Docker Hub credentials in Secrets Manager, images from dockerhub may be throttled during build: %v", err)) } else { env["CI_REGISTRY_CREDENTIALS_ARN"] = arn - term.Debugf("Stored Docker Hub credentials in Secrets Manager: %s", arn) + slog.Debug("Stored Docker Hub credentials in Secrets Manager: " + arn) } } @@ -595,7 +596,7 @@ func (b *ByocAws) GetProjectUpdate(ctx context.Context, projectName string) (*de // FillOutputs might fail if the stack is not created yet; return ErrNotExist (no bucket = no services yet) var cfnErr *cfn.ErrStackNotFoundException if errors.As(err, &cfnErr) { - term.Debugf("FillOutputs: %v", err) + slog.Debug("FillOutputs", "err", err) return nil, client.ErrNotExist // no bucket = no services yet } return nil, AnnotateAwsError(err) @@ -611,14 +612,14 @@ func (b *ByocAws) GetProjectUpdate(ctx context.Context, projectName string) (*de s3Client := aws.NewS3FromConfig(cfg) path := b.GetProjectUpdatePath(projectName) - term.Debug("Getting services from bucket:", bucketName, path) + slog.Debug(fmt.Sprint("Getting services from bucket:", bucketName, path)) getObjectOutput, err := s3Client.GetObject(ctx, &s3.GetObjectInput{ Bucket: &bucketName, Key: &path, }) if err != nil { if aws.IsS3NoSuchKeyError(err) { - term.Debug("s3.GetObject:", err) + slog.Debug(fmt.Sprint("s3.GetObject:", err)) return nil, client.ErrNotExist // no services yet } return nil, AnnotateAwsError(err) @@ -658,14 +659,14 @@ func (b *ByocAws) getSecretID(projectName, name string) string { func (b *ByocAws) PutConfig(ctx context.Context, secret *defangv1.PutConfigRequest) error { fqn := b.getSecretID(secret.Project, secret.Name) - term.Debugf("Putting parameter %q", fqn) + slog.Debug("Putting parameter", "fqn", fqn) err := b.driver.PutSecret(ctx, fqn, secret.Value) return AnnotateAwsError(err) } func (b *ByocAws) ListConfig(ctx context.Context, req *defangv1.ListConfigsRequest) (*defangv1.Secrets, error) { prefix := b.getSecretID(req.Project, "") - term.Debugf("Listing parameters with prefix %q", prefix) + slog.Debug("Listing parameters with prefix", "prefix", prefix) awsSecrets, err := b.driver.ListSecretsByPrefix(ctx, prefix) if err != nil { return nil, err @@ -696,7 +697,7 @@ func (b *ByocAws) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (ite // if the cloud formation stack has been destroyed, we can still query // logs for builds and services if err := b.driver.FillOutputs(ctx); err != nil { - term.Warnf("Unable to show CD logs: %v", err) // TODO: could skip this warning if the user wasn't asking for CD logs + slog.WarnContext(ctx, fmt.Sprintf("Unable to show CD logs: %v", err)) // TODO: could skip this warning if the user wasn't asking for CD logs } cfg, err := b.driver.LoadConfig(ctx) @@ -739,7 +740,7 @@ func (b *ByocAws) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (ite // Ignore ResourceNotFoundException errors which can only happen if a log stream is missing during Query var resourceNotFound *cwTypes.ResourceNotFoundException if errors.As(err, &resourceNotFound) { - term.Debugf("Log stream not found while tailing, skipping: %v", err) + slog.Debug("Log stream not found while tailing, skipping", "err", err) continue } if !yield(nil, AnnotateAwsError(err)) { @@ -817,7 +818,7 @@ func (b *ByocAws) queryOrTailLogs(ctx context.Context, cwClient cw.LogsClient, r if len(req.Services) == 0 { albIter, err := b.fetchAndStreamAlbLogs(ctx, req.Project, start, end, req.Pattern) if err != nil { - term.Debugf("Failed to fetch ALB logs: %v", err) + slog.Debug("Failed to fetch ALB logs", "err", err) } else { logSeq = cw.MergeLogEvents(logSeq, albIter) if req.Limit > 0 { @@ -850,7 +851,7 @@ func (b *ByocAws) getLogGroupInputs(etag types.ETag, projectName, service, filte // Tail CD and builds if logType.Has(logs.LogTypeCD) { if b.driver.LogGroupARN == "" { - term.Debug("CD stack LogGroupARN is not set; skipping CD logs") + slog.Debug("CD stack LogGroupARN is not set; skipping CD logs") } else { cdTail := cw.LogGroupInput{LogGroupARN: b.driver.LogGroupARN, LogEventFilterPattern: pattern} // If we know the CD task ARN, only tail the logstream for that CD task; FIXME: store the task ID in the project's ProjectUpdate in S3 and use that @@ -858,15 +859,15 @@ func (b *ByocAws) getLogGroupInputs(etag types.ETag, projectName, service, filte cdTail.LogStreamNames = []string{awscodebuild.GetLogStreamForBuildID(b.cdBuildId)} } groups = append(groups, cdTail) - term.Debug("Query CD logs", cdTail.LogGroupARN, cdTail.LogStreamNames, filter) + slog.Debug(fmt.Sprint("Query CD logs", cdTail.LogGroupARN, cdTail.LogStreamNames, filter)) } } if logType.Has(logs.LogTypeBuild) && projectName != "" { buildsTail := cw.LogGroupInput{LogGroupARN: b.makeLogGroupARN(b.StackDir(projectName, "builds")), LogEventFilterPattern: pattern} // must match logic in ecs/common.ts; TODO: filter by etag/service - term.Debug("Query builds logs", buildsTail.LogGroupARN, filter) + slog.Debug(fmt.Sprint("Query builds logs", buildsTail.LogGroupARN, filter)) groups = append(groups, buildsTail) ecsTail := cw.LogGroupInput{LogGroupARN: b.makeLogGroupARN(b.StackDir(projectName, "ecs")), LogEventFilterPattern: pattern} // must match logic in ecs/common.ts; TODO: filter by etag/service/deploymentId - term.Debug("Query ecs events logs", ecsTail.LogGroupARN, filter) + slog.Debug(fmt.Sprint("Query ecs events logs", ecsTail.LogGroupARN, filter)) groups = append(groups, ecsTail) } // Tail services @@ -875,7 +876,7 @@ func (b *ByocAws) getLogGroupInputs(etag types.ETag, projectName, service, filte if service != "" && etag != "" { servicesTail.LogStreamNamePrefix = service + "/" + service + "_" + etag } - term.Debug("Query services logs", servicesTail.LogGroupARN, servicesTail.LogStreamNamePrefix, pattern) + slog.Debug(fmt.Sprint("Query services logs", servicesTail.LogGroupARN, servicesTail.LogStreamNamePrefix, pattern)) groups = append(groups, servicesTail) } return groups @@ -902,7 +903,7 @@ func (b *ByocAws) UpdateServiceInfo(ctx context.Context, si *defangv1.ServiceInf } func (b *ByocAws) TearDownCD(ctx context.Context) error { - term.Warn("Deleting the Defang CD cluster; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") + slog.WarnContext(ctx, "Deleting the Defang CD cluster; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") return b.driver.TearDown(ctx) } @@ -933,7 +934,7 @@ func (b *ByocAws) DeleteConfig(ctx context.Context, secrets *defangv1.Secrets) e for i, name := range secrets.Names { ids[i] = b.getSecretID(secrets.Project, name) } - term.Debug("Deleting parameters", ids) + slog.Debug(fmt.Sprint("Deleting parameters", ids)) if err := b.driver.DeleteSecrets(ctx, ids...); err != nil { return AnnotateAwsError(err) } @@ -962,7 +963,7 @@ func (b *ByocAws) CdList(ctx context.Context, allRegions bool) (iter.Seq[state.I func (b *ByocAws) Subscribe(ctx context.Context, req *defangv1.SubscribeRequest) (iter.Seq2[*defangv1.SubscribeResponse, error], error) { if err := b.driver.FillOutputs(ctx); err != nil { - term.Warnf("Unable to get log group ARNs: %v", err) + slog.WarnContext(ctx, fmt.Sprintf("Unable to get log group ARNs: %v", err)) } cfg, err := b.driver.LoadConfig(ctx) diff --git a/src/pkg/cli/client/byoc/aws/byoc_test.go b/src/pkg/cli/client/byoc/aws/byoc_test.go index 11a3c7e6e..933ec67be 100644 --- a/src/pkg/cli/client/byoc/aws/byoc_test.go +++ b/src/pkg/cli/client/byoc/aws/byoc_test.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -360,6 +361,9 @@ aws_secret_access_key = wJalrXUtnFEMI/KDEFANG/bPxRfiCYEXAMPLEKEY for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) if tt.configFiles { // Point AWS SDK to our fake config files diff --git a/src/pkg/cli/client/byoc/aws/domain.go b/src/pkg/cli/client/byoc/aws/domain.go index 254595d43..d2d076436 100644 --- a/src/pkg/cli/client/byoc/aws/domain.go +++ b/src/pkg/cli/client/byoc/aws/domain.go @@ -3,11 +3,12 @@ package aws import ( "context" "errors" + "fmt" + "log/slog" "strings" "github.com/DefangLabs/defang/src/pkg/clouds/aws" "github.com/DefangLabs/defang/src/pkg/dns" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/aws/aws-sdk-go-v2/service/route53/types" ) @@ -27,7 +28,7 @@ func prepareDomainDelegation(ctx context.Context, projectDomain, projectName, st if !errors.Is(err, aws.ErrZoneNotFound) { return nil, "", err // TODO: we should not fail deployment if GetHostedZonesByName fails } - term.Debugf("Zone %q not found, delegation set will be created", projectDomain) + slog.Debug("Zone not found, delegation set will be created", "domain", projectDomain) } else { // Case 2: Get the NS records for the existing subdomain zone delegationSet, err = getOrCreateDelegationSetByZones(ctx, zones, projectName, stackName, r53Client) @@ -42,10 +43,10 @@ func prepareDomainDelegation(ctx context.Context, projectDomain, projectName, st // but this is acceptable because the next time the zone is deployed, we'll get the existing delegation set from the zone. delegationSet, err = findUsableDelegationSet(ctx, projectDomain, r53Client, resolverAt) if err != nil { - term.Warnf("Failed to find existing usable delegation set: %v, creating a new one", err) + slog.WarnContext(ctx, fmt.Sprintf("Failed to find existing usable delegation set: %v, creating a new one", err)) } if delegationSet != nil { - term.Debug("Reusing existing usable Route53 delegation set:", *delegationSet.Id) + slog.Debug(fmt.Sprint("Reusing existing usable Route53 delegation set:", *delegationSet.Id)) } else { delegationSet, err = createUsableDelegationSet(ctx, projectDomain, r53Client, resolverAt) if err != nil { @@ -58,7 +59,7 @@ func prepareDomainDelegation(ctx context.Context, projectDomain, projectName, st return nil, "", errors.New("no NS records found for the delegation set") // should not happen } if delegationSet.Id != nil { - term.Debug("Route53 delegation set ID:", *delegationSet.Id) + slog.Debug(fmt.Sprint("Route53 delegation set ID:", *delegationSet.Id)) delegationSetId = strings.TrimPrefix(*delegationSet.Id, "/delegationset/") } @@ -87,7 +88,7 @@ func findUsableDelegationSet(ctx context.Context, domain string, r53Client aws.R if len(hostedZones) >= 100 { // A delegation set can only be associated with up to 100 hosted zones by default // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-entities-hosted-zones) - term.Debugf("Delegation set %q has reached the maximum number of hosted zones (100), skipping", *delegationSet.Id) + slog.Debug("Delegation set has reached the maximum number of hosted zones (100), skipping", "delegationSetId", *delegationSet.Id) continue } return &delegationSet, nil @@ -119,7 +120,7 @@ func createUsableDelegationSet(ctx context.Context, domain string, r53Client aws // up to 100 delegation sets can be created per account, failure is non-fatal // there is no direct actionable remedy for the user too. // TODO: find and reuse empty delegation sets to avoid hitting the limit - term.Debugf("Failed to delete conflicting delegation set %q: %v", *delegationSet.Id, err) + slog.Debug("Failed to delete conflicting delegation set", "delegationSetId", *delegationSet.Id, "err", err) } } else { return delegationSet, nil @@ -137,7 +138,7 @@ func nameServersHasConflict(ctx context.Context, nameServers []string, domains [ return false, err } else if len(records) > 0 { // Records found, meaning the NS server is conflicting - term.Debugf("Name server %q has conflicting records for domain %q: %v", nsServer, domain, records) + slog.Debug("Name server has conflicting records for domain", "nsServer", nsServer, "domain", domain, "records", records) return true, nil } } @@ -155,7 +156,7 @@ func getOrCreateDelegationSetByZones(ctx context.Context, zones []*types.HostedZ } // Ignore zones that were created by an older CLI (2a), or another way (2c) or belong to a different project/stack (2d) if tags["defang:project"] != projectName || tags["defang:stack"] != stackName { - term.Debugf("ignored zone %q as it belongs to a different project/stack (%q/%q), skipping", projectDomain, tags["defang:project"], tags["defang:stack"]) + slog.Debug("ignored zone as it belongs to a different project/stack, skipping", "domain", projectDomain, "project", tags["defang:project"], "stack", tags["defang:stack"]) continue } @@ -164,7 +165,7 @@ func getOrCreateDelegationSetByZones(ctx context.Context, zones []*types.HostedZ // Create or get the reusable delegation set for the existing subdomain zone delegationSet, err = aws.CreateDelegationSet(ctx, zone.Id, r53Client) if delegationSetAlreadyReusable := new(types.DelegationSetAlreadyReusable); errors.As(err, &delegationSetAlreadyReusable) { - term.Debug("Route53 delegation set already created:", err) + slog.Debug(fmt.Sprint("Route53 delegation set already created:", err)) delegationSet, err = aws.GetDelegationSetByZone(ctx, zone.Id, r53Client) } if err != nil { diff --git a/src/pkg/cli/client/byoc/aws/list.go b/src/pkg/cli/client/byoc/aws/list.go index 67462cca9..b346399d9 100644 --- a/src/pkg/cli/client/byoc/aws/list.go +++ b/src/pkg/cli/client/byoc/aws/list.go @@ -2,15 +2,16 @@ package aws import ( "context" + "fmt" "io" "iter" + "log/slog" "strings" "sync" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/state" "github.com/DefangLabs/defang/src/pkg/clouds/aws" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" ) @@ -61,7 +62,7 @@ type S3Client interface { func ListPulumiStacks(ctx context.Context, s3client S3Client, bucketName string) (iter.Seq[state.PulumiState], error) { prefix := `.pulumi/stacks/` // TODO: should we filter on `projectName`? - term.Debug("Listing stacks in bucket:", bucketName) + slog.Debug(fmt.Sprint("Listing stacks in bucket:", bucketName)) out, err := s3client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ Bucket: &bucketName, Prefix: &prefix, @@ -85,7 +86,7 @@ func ListPulumiStacks(ctx context.Context, s3client S3Client, bucketName string) return io.ReadAll(getObjectOutput.Body) }) if err != nil { - term.Debugf("Skipping %q in bucket %s: %v", *obj.Key, bucketName, AnnotateAwsError(err)) + slog.Debug("Skipping object in bucket", "key", *obj.Key, "bucket", bucketName, "err", AnnotateAwsError(err)) continue } if state != nil { @@ -127,7 +128,7 @@ func (b *ByocAws) listPulumiStacksAllRegions(ctx context.Context, s3client S3Cli Bucket: bucket.Name, }) if err != nil { - term.Debugf("Skipping bucket %s: failed to get location: %v", *bucket.Name, AnnotateAwsError(err)) + slog.Debug("Skipping bucket: failed to get location", "bucket", *bucket.Name, "err", AnnotateAwsError(err)) continue } diff --git a/src/pkg/cli/client/byoc/aws/stream.go b/src/pkg/cli/client/byoc/aws/stream.go index af4814524..e78146cb5 100644 --- a/src/pkg/cli/client/byoc/aws/stream.go +++ b/src/pkg/cli/client/byoc/aws/stream.go @@ -2,6 +2,7 @@ package aws import ( "encoding/json" + "log/slog" "regexp" "slices" "strings" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/clouds/aws/cw" "github.com/DefangLabs/defang/src/pkg/clouds/aws/ecs" "github.com/DefangLabs/defang/src/pkg/logs" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" "google.golang.org/protobuf/types/known/timestamppb" @@ -90,7 +90,7 @@ func (p *logEventParser) parseEvents(events []cw.LogEvent) *defangv1.TailRespons } break } - term.Debugf("unrecognized log stream format: %s", *first.LogStreamName) + slog.Debug("unrecognized log stream format: " + *first.LogStreamName) return nil // skip, ignore sidecar logs (like route53-sidecar or fluentbit) } @@ -123,7 +123,7 @@ func (p *logEventParser) parseEvents(events []cw.LogEvent) *defangv1.TailRespons } else if parseECSEventRecords { evt, err := ecs.ParseECSEvent([]byte(*event.Message)) if err != nil { - term.Debugf("error parsing ECS event, output raw event log: %v", err) + slog.Debug("error parsing ECS event, output raw event log", "err", err) } else { entry.Service = evt.Service() entry.Etag = evt.Etag() diff --git a/src/pkg/cli/client/byoc/aws/subscribe.go b/src/pkg/cli/client/byoc/aws/subscribe.go index 9d5aa29aa..32a1606fe 100644 --- a/src/pkg/cli/client/byoc/aws/subscribe.go +++ b/src/pkg/cli/client/byoc/aws/subscribe.go @@ -2,13 +2,13 @@ package aws import ( "iter" + "log/slog" "slices" "strings" "github.com/DefangLabs/defang/src/pkg/clouds/aws/codebuild" "github.com/DefangLabs/defang/src/pkg/clouds/aws/cw" "github.com/DefangLabs/defang/src/pkg/clouds/aws/ecs" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -54,7 +54,7 @@ func parseSubscribeEvent(evt cw.LogEvent, etag types.ETag, services []string) *d func parseECSSubscribeEvent(evt cw.LogEvent, etag types.ETag, services []string) *defangv1.SubscribeResponse { ecsEvt, err := ecs.ParseECSEvent([]byte(*evt.Message)) if err != nil { - term.Debugf("error parsing ECS event: %v", err) + slog.Debug("error parsing ECS event", "err", err) return nil } diff --git a/src/pkg/cli/client/byoc/baseclient.go b/src/pkg/cli/client/byoc/baseclient.go index ca816b12d..3cd2bb5b3 100644 --- a/src/pkg/cli/client/byoc/baseclient.go +++ b/src/pkg/cli/client/byoc/baseclient.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "iter" + "log/slog" "strings" "github.com/DefangLabs/defang/src/pkg" @@ -13,7 +14,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/compose" "github.com/DefangLabs/defang/src/pkg/dns" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" composeTypes "github.com/compose-spec/compose-go/v2/types" @@ -109,7 +109,7 @@ func (b *ByocBaseClient) RemoteProjectName(ctx context.Context) (string, error) if len(projectNames) > 1 { return "", ErrMultipleProjects{ProjectNames: projectNames} } - term.Debug("Using default project:", projectNames[0]) + slog.Debug(fmt.Sprint("Using default project:", projectNames[0])) return projectNames[0], nil } diff --git a/src/pkg/cli/client/byoc/common.go b/src/pkg/cli/client/byoc/common.go index 224c628c2..aaf8c9d93 100644 --- a/src/pkg/cli/client/byoc/common.go +++ b/src/pkg/cli/client/byoc/common.go @@ -3,13 +3,14 @@ package byoc import ( "context" "errors" + "fmt" + "log/slog" "os" "os/exec" "path/filepath" "strings" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" ) const ( @@ -44,7 +45,7 @@ func GetPulumiBackend(stateUrl string) (string, string, error) { } func runLocalCommand(ctx context.Context, dir string, env []string, cmd ...string) error { - term.Debug("Running local command `", cmd, "` in dir ", dir) + slog.Debug(fmt.Sprint("Running local command `", cmd, "` in dir ", dir)) // TODO - use enums to define commands instead of passing strings down from the caller // #nosec G204 command := exec.CommandContext(ctx, cmd[0], cmd[1:]...) @@ -58,7 +59,7 @@ func runLocalCommand(ctx context.Context, dir string, env []string, cmd ...strin func DebugPulumiNodeJS(ctx context.Context, env []string, cmd ...string) error { // Locally we use the "dev" script from package.json to run Pulumi commands, which uses ts-node localCmd := append([]string{"npm", "run", "dev"}, cmd...) - term.Debug(strings.Join(append(env, localCmd...), " ")) + slog.Debug(strings.Join(append(env, localCmd...), " ")) dir := os.Getenv("DEFANG_PULUMI_DIR") if dir == "" { @@ -79,7 +80,7 @@ func DebugPulumiNodeJS(ctx context.Context, env []string, cmd ...string) error { func DebugPulumiGolang(ctx context.Context, env []string, cmd ...string) error { localCmd := append([]string{"go", "run", "./..."}, cmd...) - term.Debug(strings.Join(append(env, localCmd...), " ")) + slog.Debug(strings.Join(append(env, localCmd...), " ")) dir := os.Getenv("DEFANG_PULUMI_DIR") if dir == "" { diff --git a/src/pkg/cli/client/byoc/do/byoc.go b/src/pkg/cli/client/byoc/do/byoc.go index e23d183e3..4bdd0b306 100644 --- a/src/pkg/cli/client/byoc/do/byoc.go +++ b/src/pkg/cli/client/byoc/do/byoc.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io" + "log/slog" "iter" "net/url" @@ -109,7 +110,7 @@ func (b *ByocDo) GetProjectUpdate(ctx context.Context, projectName string) (*def if err != nil { if aws.IsS3NoSuchKeyError(err) { - term.Debug("s3.GetObject:", err) + slog.Debug(fmt.Sprint("s3.GetObject:", err)) return nil, client.ErrNotExist // no services yet } return nil, awsbyoc.AnnotateAwsError(err) @@ -426,7 +427,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter if deploymentID == "" || appID == "" { //Look up the CD app directly instead of relying on the etag - term.Debug("Fetching app and deployment ID for app", appPlatform.CdName) + slog.Debug(fmt.Sprint("Fetching app and deployment ID for app", appPlatform.CdName)) cdApp, err := b.getAppByName(ctx, appPlatform.CdName) if err != nil { return nil, err @@ -446,7 +447,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter return nil, errors.New("no deployments found") } - term.Info("Waiting for CD command to finish gathering logs") + slog.InfoContext(ctx, "Waiting for CD command to finish gathering logs") for { deploymentInfo, _, err := b.client.Apps.GetDeployment(ctx, appID, deploymentID) if err != nil { @@ -455,7 +456,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter logType := logs.LogType(req.LogType) - term.Debugf("Deployment phase: %s", deploymentInfo.GetPhase()) + slog.Debug("Deployment phase", "phase", deploymentInfo.GetPhase()) switch deploymentInfo.GetPhase() { case godo.DeploymentPhase_PendingBuild, godo.DeploymentPhase_PendingDeploy, godo.DeploymentPhase_Deploying: // Do nothing; check again in 10 seconds @@ -496,7 +497,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter } func (b *ByocDo) TearDownCD(ctx context.Context) error { - term.Warn("Deleting the Defang CD app; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") + slog.WarnContext(ctx, "Deleting the Defang CD app; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") app, err := b.getAppByName(ctx, appPlatform.CdName) if err != nil { return err @@ -699,7 +700,7 @@ func (b *ByocDo) SetUpCD(ctx context.Context, force bool) error { if resp.StatusCode != 404 { return err } - term.Debug("Creating new registry") + slog.Debug("Creating new registry") // Create registry if it doesn't exist registry, _, err = b.client.Registry.Create(ctx, &godo.RegistryCreateRequest{ Name: pkg.RandomID(), // has to be globally unique diff --git a/src/pkg/cli/client/byoc/gcp/byoc.go b/src/pkg/cli/client/byoc/gcp/byoc.go index 47445ff96..b9431f7ee 100644 --- a/src/pkg/cli/client/byoc/gcp/byoc.go +++ b/src/pkg/cli/client/byoc/gcp/byoc.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "errors" "fmt" + "log/slog" "iter" "os" @@ -174,7 +175,7 @@ func (b *ByocGcp) SetUpCD(ctx context.Context, force bool) error { } // TODO: Handle project creation flow - term.Infof("Setting up defang CD in GCP project %s, this could take a few minutes", b.driver.GetProjectID()) + slog.InfoContext(ctx, fmt.Sprintf("Setting up defang CD in GCP project %s, this could take a few minutes", b.driver.GetProjectID())) // 1. Enable required APIs // TODO: enable minimum APIs needed for bootstrap the cd image, let CD enable the rest of the APIs apis := []string{ @@ -282,7 +283,7 @@ func (b *ByocGcp) SetUpCD(ctx context.Context, force bool) error { } } - term.Debugf("Using CD image: %q", b.CDImage) + slog.Debug("Using CD image", "image", b.CDImage) b.SetupDone = true return nil @@ -310,7 +311,7 @@ func (b *ByocGcp) CdList(ctx context.Context, _allRegions bool) (iter.Seq[state. prefix := `.pulumi/stacks/` // TODO: should we filter on `projectName`? uploadSA := b.driver.GetServiceAccountEmail(DefangUploadServiceAccountName) - term.Debug("Getting services from pulumi stacks bucket:", bucketName, prefix, uploadSA) + slog.Debug(fmt.Sprint("Getting services from pulumi stacks bucket:", bucketName, prefix, uploadSA)) objLoader := func(ctx context.Context, bucket, object string) ([]byte, error) { return b.driver.GetBucketObjectWithServiceAccount(ctx, bucket, object, uploadSA) } @@ -321,12 +322,12 @@ func (b *ByocGcp) CdList(ctx context.Context, _allRegions bool) (iter.Seq[state. return func(yield func(state.Info) bool) { for obj, err := range seq { if err != nil { - term.Debugf("Error listing object in bucket %s: %v", bucketName, annotateGcpError(err)) + slog.Debug("Error listing object in bucket", "bucket", bucketName, "err", annotateGcpError(err)) continue } st, err := state.ParsePulumiStateFile(ctx, gcpObj{obj}, bucketName, objLoader) if err != nil { - term.Debugf("Skipping %q in bucket %s: %v", obj.Name, bucketName, annotateGcpError(err)) + slog.Debug("Skipping object in bucket", "object", obj.Name, "bucket", bucketName, "err", annotateGcpError(err)) continue } if st == nil { @@ -487,7 +488,7 @@ func (b *ByocGcp) runCdCommand(ctx context.Context, cmd cdCommand) (string, erro if err != nil { return "", err } - term.Debugf("Starting CD in cloudbuild at: %v", time.Now().Format(time.RFC3339)) + slog.Debug("Starting CD in cloudbuild", "at", time.Now().Format(time.RFC3339)) buildId, err := b.driver.RunCloudBuild(ctx, gcp.CloudBuildArgs{ Steps: string(steps), ServiceAccount: &b.cdServiceAccount, @@ -690,7 +691,7 @@ func (e ConflictDelegateDomainError) Error() string { } func (b *ByocGcp) PrepareDomainDelegation(ctx context.Context, req client.PrepareDomainDelegationRequest) (*client.PrepareDomainDelegationResponse, error) { - term.Debugf("Preparing domain delegation for %s", req.DelegateDomain) + slog.Debug("Preparing domain delegation for " + req.DelegateDomain) name := "defang-" + dns.SafeLabel(req.DelegateDomain) if zone, err := b.driver.EnsureDNSZoneExists(ctx, name, req.DelegateDomain, "defang delegate domain"); err != nil { if apiErr := new(googleapi.Error); errors.As(err, &apiErr) { @@ -710,7 +711,7 @@ func (b *ByocGcp) PrepareDomainDelegation(ctx context.Context, req client.Prepar return nil, annotateGcpError(err) } else { b.delegateDomainZone = zone.Name - term.Debugf("Zone %s created with nameservers %v", zone.Name, zone.NameServers) + slog.Debug("Zone created with nameservers", "zone", zone.Name, "nameservers", zone.NameServers) return &client.PrepareDomainDelegationResponse{ NameServers: zone.NameServers, }, nil @@ -720,7 +721,7 @@ func (b *ByocGcp) PrepareDomainDelegation(ctx context.Context, req client.Prepar func (b *ByocGcp) DeleteConfig(ctx context.Context, req *defangv1.Secrets) error { for _, name := range req.Names { secretId := b.resourceName(req.Project, name) - term.Debugf("Deleting secret %q", secretId) + slog.Debug("Deleting secret", "secretId", secretId) if err := b.driver.DeleteSecret(ctx, secretId); err != nil { return fmt.Errorf("failed to delete secret %q: %w", secretId, err) } @@ -749,7 +750,7 @@ func (b *ByocGcp) ListConfig(ctx context.Context, req *defangv1.ListConfigsReque func (b *ByocGcp) PutConfig(ctx context.Context, req *defangv1.PutConfigRequest) error { secretId := b.resourceName(req.Project, req.Name) - term.Debugf("Creating secret %q", secretId) + slog.Debug("Creating secret", "secretId", secretId) if _, err := b.driver.CreateSecret(ctx, secretId); err != nil { if gcp.IsAccessNotEnabled(err) { @@ -760,13 +761,13 @@ func (b *ByocGcp) PutConfig(ctx context.Context, req *defangv1.PutConfigRequest) } if err != nil { if stat, ok := status.FromError(err); ok && stat.Code() == codes.AlreadyExists { - term.Debugf("Secret %q already exists", secretId) + slog.Debug("Secret already exists", "secretId", secretId) } else { return fmt.Errorf("failed to create secret %q: %w", secretId, err) } } } - term.Debugf("Adding a new secret version for %q", secretId) + slog.Debug("Adding a new secret version", "secretId", secretId) if _, err := b.driver.AddSecretVersion(ctx, secretId, []byte(req.Value)); err != nil { return fmt.Errorf("failed to add secret version for %q: %w", secretId, err) } @@ -821,7 +822,7 @@ func LogEntriesToString(logEntries []*loggingpb.LogEntry) string { } func (b *ByocGcp) TearDownCD(ctx context.Context) error { - // term.Warn("Deleting Defang CD; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") + // slog.Warn("Deleting Defang CD; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") // FIXME: implement return client.ErrNotImplemented("GCP TearDown") } @@ -842,10 +843,10 @@ func (b *ByocGcp) GetProjectUpdate(ctx context.Context, projectName string) (*de // Current user might not have object viewer access to the bucket, use the upload service account to get the object uploadSA := b.driver.GetServiceAccountEmail(DefangUploadServiceAccountName) - term.Debug("Getting services from bucket:", bucketName, path, uploadSA) + slog.Debug(fmt.Sprint("Getting services from bucket:", bucketName, path, uploadSA)) pbBytes, err := b.driver.GetBucketObjectWithServiceAccount(ctx, bucketName, path, uploadSA) if err != nil { - term.Debugf("Failed to get project bucket object from bucket %q at path %q with service account %q: %v", bucketName, path, uploadSA, err) + slog.Debug("Failed to get project bucket object", "bucket", bucketName, "path", path, "serviceAccount", uploadSA, "err", err) // Handle the case where the object does not exist, or where we do not have permission to view the object, ie. // "Permission 'iam.serviceAccounts.getAccessToken' denied on resource (or it may not exist)." #2051 if errors.Is(err, gcp.ErrObjectNotExist) || strings.Contains(err.Error(), "(or it may not exist)") { diff --git a/src/pkg/cli/client/byoc/gcp/stream.go b/src/pkg/cli/client/byoc/gcp/stream.go index 6e3de50b8..13d4715fd 100644 --- a/src/pkg/cli/client/byoc/gcp/stream.go +++ b/src/pkg/cli/client/byoc/gcp/stream.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "iter" + "log/slog" "path" "regexp" "strings" @@ -14,7 +15,6 @@ import ( "cloud.google.com/go/logging/apiv2/loggingpb" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/clouds/gcp" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" auditpb "google.golang.org/genproto/googleapis/cloud/audit" "google.golang.org/grpc/codes" @@ -71,7 +71,7 @@ func (s *ServerStream[T]) Follow(start time.Time) (iter.Seq2[*T, error], error) } query := s.query.GetQuery() shouldList := !start.IsZero() && start.Unix() > 0 && time.Since(start) > 10*time.Millisecond - term.Debugf("Query and tail logs since %v with query: \n%v", start, query) + slog.Debug("Query and tail logs", "since", start, "query", query) return func(yield func(*T, error) bool) { defer tailer.Close() // Only query older logs if start time is more than 10ms ago @@ -126,7 +126,7 @@ func (s *ServerStream[T]) Follow(start time.Time) (iter.Seq2[*T, error], error) // Head returns an iterator that queries logs in ascending order. func (s *ServerStream[T]) Head(limit int32) iter.Seq2[*T, error] { query := s.query.GetQuery() - term.Debugf("Query logs with query: \n%v", query) + slog.Debug("Query logs", "query", query) return func(yield func(*T, error) bool) { lister, err := s.gcpLogsClient.ListLogEntries(s.ctx, query, gcp.OrderAscending) if err != nil { @@ -140,7 +140,7 @@ func (s *ServerStream[T]) Head(limit int32) iter.Seq2[*T, error] { // Tail returns an iterator that queries logs in descending order, reversing if a limit is set. func (s *ServerStream[T]) Tail(limit int32) iter.Seq2[*T, error] { query := s.query.GetQuery() - term.Debugf("Query logs with query: \n%v", query) + slog.Debug("Query logs", "query", query) return func(yield func(*T, error) bool) { lister, err := s.gcpLogsClient.ListLogEntries(s.ctx, query, gcp.OrderDescending) if err != nil { @@ -486,13 +486,13 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor } if entry.GetProtoPayload().GetTypeUrl() != "type.googleapis.com/google.cloud.audit.AuditLog" { - term.Warnf("unexpected log entry type : %v", entry.GetProtoPayload().GetTypeUrl()) + slog.WarnContext(ctx, fmt.Sprintf("unexpected log entry type : %v", entry.GetProtoPayload().GetTypeUrl())) return nil, nil } auditLog := new(auditpb.AuditLog) if err := entry.GetProtoPayload().UnmarshalTo(auditLog); err != nil { - term.Warnf("failed to unmarshal audit log : %v", err) + slog.WarnContext(ctx, fmt.Sprintf("failed to unmarshal audit log : %v", err)) return nil, nil } @@ -528,7 +528,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor Status: status.GetMessage(), }}, nil } else { - term.Warnf("missing request and response in audit log for service %v", path.Base(auditLog.GetResourceName())) + slog.WarnContext(ctx, fmt.Sprintf("missing request and response in audit log for service %v", path.Base(auditLog.GetResourceName()))) return nil, nil } @@ -551,7 +551,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor serviceName := GetValueInStruct(response, "spec.template.metadata.labels.defang-service") status := auditLog.GetStatus() if status == nil { - term.Warnf("missing status in audit log for job %v", path.Base(auditLog.GetResourceName())) + slog.WarnContext(ctx, fmt.Sprintf("missing status in audit log for job %v", path.Base(auditLog.GetResourceName()))) return nil, nil } var state defangv1.ServiceState @@ -579,7 +579,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor // Report all ready services when CD is successful, prevents cli deploy stop before cd is done return getReadyServicesCompletedResps(auditLog.GetStatus().GetMessage()), nil // Ignore success cd status when we are waiting for service status } else { - term.Warnf("unexpected execution name in audit log : %v", executionName) + slog.WarnContext(ctx, fmt.Sprintf("unexpected execution name in audit log : %v", executionName)) return nil, nil } case "gce_instance_group_manager": // Compute engine update start @@ -591,24 +591,24 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor managerName := entry.Resource.Labels["instance_group_manager_name"] labels, err := gcpLogsClient.GetInstanceGroupManagerLabels(ctx, project, region, managerName) if err != nil { - term.Warnf("failed to get instance group manager labels for %v: %v", managerName, err) + slog.WarnContext(ctx, fmt.Sprintf("failed to get instance group manager labels for %v: %v", managerName, err)) return nil, nil } serviceName := labels["defang-service"] if serviceName == "" { - term.Warnf("missing defang-service label in instance group manager %v", managerName) + slog.WarnContext(ctx, fmt.Sprintf("missing defang-service label in instance group manager %v", managerName)) return nil, nil } if etag != "" { labelEtag := labels["defang-etag"] if labelEtag != etag { - term.Warnf("skipping instance group manager %v: etag mismatch (got %q, want %q)", managerName, labelEtag, etag) + slog.WarnContext(ctx, fmt.Sprintf("skipping instance group manager %v: etag mismatch (got %q, want %q)", managerName, labelEtag, etag)) return nil, nil } } rootTriggerId := entry.GetLabels()["compute.googleapis.com/root_trigger_id"] if rootTriggerId == "" { - term.Warnf("missing root_trigger_id in audit log for instance group manager %v", path.Base(auditLog.GetResourceName())) + slog.WarnContext(ctx, fmt.Sprintf("missing root_trigger_id in audit log for instance group manager %v", path.Base(auditLog.GetResourceName()))) } else { computeEngineRootTriggers[rootTriggerId] = serviceName } @@ -622,12 +622,12 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor rootTriggerId := entry.GetLabels()["compute.googleapis.com/root_trigger_id"] serviceName, ok := computeEngineRootTriggers[rootTriggerId] if !ok { - term.Debugf("ignored root trigger id %v for instance group insert", rootTriggerId) + slog.Debug("ignored root trigger id for instance group insert", "rootTriggerId", rootTriggerId) return nil, nil } response := auditLog.GetResponse() if response == nil { - term.Warnf("missing response in audit log for instance group %v", path.Base(auditLog.GetResourceName())) + slog.WarnContext(ctx, fmt.Sprintf("missing response in audit log for instance group %v", path.Base(auditLog.GetResourceName()))) return nil, nil } status := response.GetFields()["status"].GetStringValue() @@ -653,7 +653,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor } bt, err := gcpLogsClient.GetBuildInfo(ctx, buildId) // TODO: Cache the build IDs? if err != nil { - term.Warnf("failed to get build tag for build %v: %v", buildId, err) + slog.WarnContext(ctx, fmt.Sprintf("failed to get build tag for build %v: %v", buildId, err)) return nil, nil } @@ -707,7 +707,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor }}, nil } default: - term.Warnf("unexpected resource type : %v", entry.Resource.Type) + slog.WarnContext(ctx, fmt.Sprintf("unexpected resource type : %v", entry.Resource.Type)) return nil, nil } } diff --git a/src/pkg/cli/client/byoc/state/parse.go b/src/pkg/cli/client/byoc/state/parse.go index 76e19b37e..ee4b6480d 100644 --- a/src/pkg/cli/client/byoc/state/parse.go +++ b/src/pkg/cli/client/byoc/state/parse.go @@ -4,11 +4,11 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "path" "strconv" "strings" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" ) @@ -86,12 +86,12 @@ func ParsePulumiStateFile(ctx context.Context, obj BucketObj, bucket string, obj Name: path.Base(stackFile), // legacy logic to derive stack name from file name } if state.Version != 3 { - term.Debug("Skipping Pulumi state with version", state.Version) + slog.Debug(fmt.Sprint("Skipping Pulumi state with version", state.Version)) } else if len(state.Checkpoint.Latest.PendingOperations) > 0 { for _, op := range state.Checkpoint.Latest.PendingOperations { parts := strings.Split(op.Resource.Urn, "::") // prefix::project::type::resource => {urn:provider:stack}::{project}::{plugin:file:class}::{name} if len(parts) < 4 { - term.Debug("Skipping pending operation with malformed URN:", op.Resource.Urn) + slog.Debug(fmt.Sprint("Skipping pending operation with malformed URN:", op.Resource.Urn)) continue } stack.Pending = append(stack.Pending, parts[3]) diff --git a/src/pkg/cli/client/caniuse.go b/src/pkg/cli/client/caniuse.go index eb9d5b58d..cb9a8b37e 100644 --- a/src/pkg/cli/client/caniuse.go +++ b/src/pkg/cli/client/caniuse.go @@ -3,9 +3,10 @@ package client import ( "context" "errors" + "fmt" + "log/slog" "os" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -65,24 +66,24 @@ type versionLabel string // resolveVersion picks the version to use: env override > force upgrade > allow upgrade > pin to previous > latest. func resolveVersion(fromEnv, fromFabric, previous string, label versionLabel, allowUpgrade bool, forcedReason string) string { if fromEnv != "" { - term.Debugf("Using %s from env: %s", label, fromEnv) + slog.Debug("Using version from env", "label", label, "version", fromEnv) return fromEnv } if previous == "" || fromFabric == previous { - term.Debugf("Using %s: %s", label, fromFabric) + slog.Debug("Using version from fabric", "label", label, "version", fromFabric) return fromFabric } if forcedReason != "" { - term.Debugf("Using %s from fabric: %s", label, fromFabric) - term.Warnf("Overriding %s: %s", label, forcedReason) + slog.Debug("Using version from fabric (forced)", "label", label, "version", fromFabric) + slog.Warn(fmt.Sprintf("Overriding %s: %s", label, forcedReason)) return fromFabric } if allowUpgrade { - term.Debugf("Using latest %s: %s", label, fromFabric) - term.Infof("Upgrading %s to latest", label) + slog.Debug("Using latest version from fabric", "label", label, "version", fromFabric) + slog.Info(fmt.Sprintf("Upgrading %s to latest", label)) return fromFabric } - term.Debugf("Using previous %s: %s", label, previous) - term.Warnf("A newer %s is available; using previously deployed version. To upgrade, re-run with --allow-upgrade or set DEFANG_ALLOW_UPGRADE=1", label) + slog.Debug("Using previous version", "label", label, "version", previous) + slog.Warn(fmt.Sprintf("A newer %s is available; using previously deployed version. To upgrade, re-run with --allow-upgrade or set DEFANG_ALLOW_UPGRADE=1", label)) return previous } diff --git a/src/pkg/cli/client/cluster.go b/src/pkg/cli/client/cluster.go index 174c85368..25f01daff 100644 --- a/src/pkg/cli/client/cluster.go +++ b/src/pkg/cli/client/cluster.go @@ -1,13 +1,13 @@ package client import ( + "log/slog" "net" "os" "path/filepath" "strings" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/tokenstore" ) @@ -42,18 +42,18 @@ func GetExistingToken(fabricAddr string) string { var accessToken = os.Getenv("DEFANG_ACCESS_TOKEN") if accessToken != "" { - term.Debug("Using access token from env DEFANG_ACCESS_TOKEN") + slog.Debug("Using access token from env DEFANG_ACCESS_TOKEN") } else { var err error accessToken, err = TokenStore.Load(TokenStorageName(fabricAddr)) if err != nil { - term.Debugf("failed to load access token for %v: %v", fabricAddr, err) + slog.Debug("failed to load access token", "fabricAddr", fabricAddr, "err", err) } // Check if we wrote an IDToken file during login, if AWS_WEB_IDENTITY_TOKEN_FILE is empty, if os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE") == "" { if jwtPath, err := GetWebIdentityTokenFile(fabricAddr); err == nil { - term.Debugf("using web identity token from %s", jwtPath) + slog.Debug("using web identity token from " + jwtPath) // Set AWS env vars for this CLI invocation os.Setenv("AWS_WEB_IDENTITY_TOKEN_FILE", jwtPath) os.Setenv("AWS_ROLE_SESSION_NAME", "defang-cli") // TODO: from WhoAmI diff --git a/src/pkg/cli/client/grpc_logger.go b/src/pkg/cli/client/grpc_logger.go index a2b32c409..e38de4e58 100644 --- a/src/pkg/cli/client/grpc_logger.go +++ b/src/pkg/cli/client/grpc_logger.go @@ -3,11 +3,12 @@ package client import ( "context" "encoding/json" + "fmt" + "log/slog" "net/http" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" ) const maxPayloadLength = 1024 @@ -38,7 +39,7 @@ func (g grpcLogger) logRequest(header http.Header, reqType, payload string) { requestId := pkg.RandomID() header.Set("X-Request-Id", requestId) - term.Debug(g.prefix, requestId, reqType, payload) + slog.Debug(fmt.Sprint(g.prefix, requestId, reqType, payload)) } func (g grpcLogger) WrapStreamingClient(next connect.StreamingClientFunc) connect.StreamingClientFunc { diff --git a/src/pkg/cli/client/playground.go b/src/pkg/cli/client/playground.go index 8ce49d51e..7efeb4ecb 100644 --- a/src/pkg/cli/client/playground.go +++ b/src/pkg/cli/client/playground.go @@ -3,14 +3,15 @@ package client import ( "context" "errors" + "fmt" "io" "iter" + "log/slog" "os" "connectrpc.com/connect" byocState "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/state" "github.com/DefangLabs/defang/src/pkg/dns" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -183,7 +184,7 @@ func (g *PlaygroundProvider) RemoteProjectName(ctx context.Context) (string, err if resp.Project == "" { return "", errors.New("no Playground projects found") } - term.Debug("Using default Playground project: ", resp.Project) + slog.Debug(fmt.Sprint("Using default Playground project: ", resp.Project)) return resp.Project, nil } diff --git a/src/pkg/cli/client/pretty_error.go b/src/pkg/cli/client/pretty_error.go index ff17ca3a8..75d35f007 100644 --- a/src/pkg/cli/client/pretty_error.go +++ b/src/pkg/cli/client/pretty_error.go @@ -3,17 +3,17 @@ package client import ( "errors" "fmt" + "log/slog" "strings" "connectrpc.com/connect" - "github.com/DefangLabs/defang/src/pkg/term" ) func PrettyError(err error) error { // To avoid printing the internal gRPC error code var cerr *connect.Error if errors.As(err, &cerr) { - term.Debug("Server error:", cerr) + slog.Debug(fmt.Sprint("Server error:", cerr)) err = errors.Unwrap(cerr) } if IsNetworkError(err) { diff --git a/src/pkg/cli/client/projectName.go b/src/pkg/cli/client/projectName.go index 78958a68c..72e452377 100644 --- a/src/pkg/cli/client/projectName.go +++ b/src/pkg/cli/client/projectName.go @@ -3,8 +3,7 @@ package client import ( "context" "fmt" - - "github.com/DefangLabs/defang/src/pkg/term" + "log/slog" ) // Deprecated: should use stacks instead of ProjectName fallback. @@ -15,10 +14,10 @@ func LoadProjectNameWithFallback(ctx context.Context, loader Loader, provider Pr if err == nil { return projectName, nil } - term.Debug("Failed to load local project:", err) + slog.Debug(fmt.Sprint("Failed to load local project:", err)) loadErr = err } - term.Debug("Trying to get the remote project name from the provider") + slog.Debug("Trying to get the remote project name from the provider") projectName, err := provider.RemoteProjectName(ctx) if err != nil { return "", fmt.Errorf("%w and %w", loadErr, err) diff --git a/src/pkg/cli/common.go b/src/pkg/cli/common.go index 2b2163d66..07b58a996 100644 --- a/src/pkg/cli/common.go +++ b/src/pkg/cli/common.go @@ -3,6 +3,7 @@ package cli import ( "context" "encoding/json" + "log/slog" "os" "github.com/DefangLabs/defang/src/pkg/cli/client" @@ -91,7 +92,7 @@ func putDeploymentAndStack(ctx context.Context, provider client.Provider, fabric if err != nil { return err } - term.Debugf("Deployment origin metadata: %s", string(originMetadataBytes)) + slog.Debug("Deployment origin metadata: " + string(originMetadataBytes)) } return fabric.PutDeployment(ctx, &defangv1.PutDeploymentRequest{ diff --git a/src/pkg/cli/compose/baseimage.go b/src/pkg/cli/compose/baseimage.go index 4b10a0519..1ddf2385c 100644 --- a/src/pkg/cli/compose/baseimage.go +++ b/src/pkg/cli/compose/baseimage.go @@ -2,12 +2,12 @@ package compose import ( "fmt" + "log/slog" "maps" "os" "path/filepath" "slices" - "github.com/DefangLabs/defang/src/pkg/term" composeTypes "github.com/compose-spec/compose-go/v2/types" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" @@ -25,7 +25,7 @@ func FindAllBaseImages(project *composeTypes.Project) ([]string, error) { images, err := extractDockerfileBaseImages(dockerfileFullPath) if err != nil { if os.IsNotExist(err) { - term.Debugf("service %q: dockerfile %q does not exist; skipping", service.Name, dockerfileFullPath) + slog.Debug("service: dockerfile does not exist; skipping", "service", service.Name, "dockerfile", dockerfileFullPath) continue } return nil, err diff --git a/src/pkg/cli/compose/compose_test.go b/src/pkg/cli/compose/compose_test.go index 7c3698ee9..a24336e8f 100644 --- a/src/pkg/cli/compose/compose_test.go +++ b/src/pkg/cli/compose/compose_test.go @@ -2,9 +2,11 @@ package compose import ( "bytes" + "log/slog" "os" "testing" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" ) @@ -189,6 +191,7 @@ func TestComposeGoNoDoubleWarningLog(t *testing.T) { var warnings bytes.Buffer term.DefaultTerm = term.NewTerm(os.Stdin, &warnings, &warnings) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) loader := NewLoader(WithPath("../../../testdata/compose-go-warn/compose.yaml")) _, err := loader.LoadProject(t.Context()) diff --git a/src/pkg/cli/compose/context.go b/src/pkg/cli/compose/context.go index 40d1106a1..eb3cc290f 100644 --- a/src/pkg/cli/compose/context.go +++ b/src/pkg/cli/compose/context.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "io/fs" + "log/slog" "os" "path/filepath" "strings" @@ -219,7 +220,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec return fmt.Sprintf("s3://cd-preview/%s%s", service, archiveType.Extension), nil } - term.Info("Packaging the project files for", service, "at", root) + slog.InfoContext(ctx, fmt.Sprint("Packaging the project files for", service, "at", root)) buffer, err := createArchive(ctx, build.Context, build.Dockerfile, archiveType) if err != nil { return "", err @@ -230,7 +231,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec case UploadModeDefault, UploadModeDigest: // Calculate the digest of the tarball and pass it to the fabric controller (to avoid building the same image twice) digest = calcDigest(buffer.Bytes()) - term.Debugf("Digest for %q: %s", service, digest) + slog.Debug("Digest for service", "service", service, "digest", digest) case UploadModePreview: // For preview, we invoke the CD "preview" command, which will want a valid (S3) URL for diff, even though it won't be used digest = calcDigest(buffer.Bytes()) @@ -241,7 +242,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec panic("unexpected UploadMode value") } - term.Info("Uploading the project files for", service) + slog.InfoContext(ctx, fmt.Sprint("Uploading the project files for", service)) return uploadArchive(ctx, provider, projectName, buffer, archiveType, digest) } @@ -297,7 +298,7 @@ func tryReadIgnoreFile(cwd, ignorefile string) io.ReadCloser { if err != nil { return nil } - term.Debug("Reading .dockerignore file from", ignorefile) + slog.Debug(fmt.Sprint("Reading .dockerignore file from", ignorefile)) return reader } @@ -306,7 +307,7 @@ func tryReadIgnoreFile(cwd, ignorefile string) io.ReadCloser { // Returns the filename of the written file and an error. func writeDefaultIgnoreFile(cwd string, dockerignore string) (string, error) { path := filepath.Join(cwd, dockerignore) - term.Debug("Writing .dockerignore file to", path) + slog.Debug(fmt.Sprint("Writing .dockerignore file to", path)) err := os.WriteFile(path, []byte(defaultDockerIgnore), 0644) if err != nil { @@ -369,7 +370,7 @@ func walkContextFolder(root, dockerfile string, writeIgnore writeIgnoreFile, fn if dockerignore == "" && writeIgnore { // Generate a default .dockerignore file if none exists (to be included in the context) - term.Warn("No .dockerignore file found; creating default .dockerignore; you may add this to source control (git)") + slog.Warn("No .dockerignore file found; creating default .dockerignore; you may add this to source control (git)") var err error dockerignore, err = writeDefaultIgnoreFile(root, dotdockerignore) if err != nil { @@ -412,7 +413,7 @@ func walkContextFolder(root, dockerfile string, writeIgnore writeIgnoreFile, fn return err } if ignore { - term.Debug("Ignoring", relPath) // TODO: avoid printing in this function + slog.Debug(fmt.Sprint("Ignoring", relPath)) // TODO: avoid printing in this function if de.IsDir() { return filepath.SkipDir } @@ -447,7 +448,7 @@ func createArchive(ctx context.Context, root string, dockerfile string, contentT doProgress := term.StdoutCanColor() && term.IsTerminal() err := walkContextFolder(root, dockerfile, writeIgnoreFileYes, func(path string, de os.DirEntry, slashPath string) error { if term.DoDebug() { - term.Debug("Adding", slashPath) + slog.Debug(fmt.Sprint("Adding", slashPath)) } else if doProgress { term.Printf("%4d %s\r", fileCount, slashPath) defer term.ClearLine() @@ -474,7 +475,7 @@ func createArchive(ctx context.Context, root string, dockerfile string, contentT fileCount++ if fileCount == ContextFileLimit+1 { - term.Warnf("the build context contains more than %d files; use --debug or create .dockerignore to exclude caches and build artifacts", ContextFileLimit) + slog.WarnContext(ctx, fmt.Sprintf("the build context contains more than %d files; use --debug or create .dockerignore to exclude caches and build artifacts", ContextFileLimit)) } bufLen := buf.Len() @@ -483,7 +484,7 @@ func createArchive(ctx context.Context, root string, dockerfile string, contentT return fmt.Errorf("the build context is limited to %s; consider downloading large files in the Dockerfile or set the DEFANG_BUILD_CONTEXT_LIMIT environment variable", units.BytesSize(float64(ContextSizeHardLimit))) } if bufLen <= ContextSizeSoftLimit && buf.Len() > ContextSizeSoftLimit { - term.Warnf("the build context is larger than %s; use --debug or create .dockerignore to exclude caches and build artifacts", units.BytesSize(float64(buf.Len()))) + slog.WarnContext(ctx, fmt.Sprintf("the build context is larger than %s; use --debug or create .dockerignore to exclude caches and build artifacts", units.BytesSize(float64(buf.Len())))) } return err }) diff --git a/src/pkg/cli/compose/dockerfile_validator.go b/src/pkg/cli/compose/dockerfile_validator.go index 8f55ab967..d722936ea 100644 --- a/src/pkg/cli/compose/dockerfile_validator.go +++ b/src/pkg/cli/compose/dockerfile_validator.go @@ -3,11 +3,11 @@ package compose import ( "bytes" "fmt" + "log/slog" "os" "path/filepath" "strings" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/moby/buildkit/frontend/dockerfile/parser" ) @@ -35,7 +35,7 @@ func (e *DockerfileValidationError) Unwrap() error { // ValidateDockerfile validates the syntax and basic structure of a Dockerfile func ValidateDockerfile(dockerfilePath string, serviceName string) error { - term.Debugf("Validating Dockerfile: %s for service %q", dockerfilePath, serviceName) + slog.Debug("Validating Dockerfile for service", "dockerfile", dockerfilePath, "service", serviceName) // Read the Dockerfile content, err := os.ReadFile(dockerfilePath) @@ -124,7 +124,7 @@ func ValidateDockerfile(dockerfilePath string, serviceName string) error { } } // Log warnings but don't fail validation - term.Warnf("service %q: Dockerfile %q has warnings:\n %s", serviceName, dockerfilePath, strings.Join(warnings, "\n ")) + slog.Warn(fmt.Sprintf("service %q: Dockerfile %q has warnings:\n %s", serviceName, dockerfilePath, strings.Join(warnings, "\n "))) } return nil @@ -161,7 +161,7 @@ func ValidateServiceDockerfiles(project *Project) error { if os.IsNotExist(err) { // This might be handled later by Railpack or may be a remote context // Only validate if the file exists - term.Debugf("Skipping validation for service %q: Dockerfile %q does not exist", service.Name, dockerfilePath) + slog.Debug("Skipping validation for service: Dockerfile does not exist", "service", service.Name, "dockerfile", dockerfilePath) continue } errors = append(errors, &DockerfileValidationError{ diff --git a/src/pkg/cli/compose/fixup.go b/src/pkg/cli/compose/fixup.go index 615cae441..b6e4ce169 100644 --- a/src/pkg/cli/compose/fixup.go +++ b/src/pkg/cli/compose/fixup.go @@ -3,6 +3,7 @@ package compose import ( "context" "fmt" + "log/slog" "os" "path/filepath" "slices" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" composeTypes "github.com/compose-spec/compose-go/v2/types" ) @@ -32,14 +32,14 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // Preload the current config so we can detect which environment variables should be passed as "secrets" config, err := provider.ListConfig(ctx, &defangv1.ListConfigsRequest{Project: project.Name}) if err != nil { - term.Debugf("failed to load config: %v", err) + slog.Debug("failed to load config", "err", err) config = &defangv1.Secrets{} } slices.Sort(config.Names) // sort for binary search accountInfo, err := provider.AccountInfo(ctx) if err != nil { - term.Debugf("failed to get account info to fixup services: %v", err) + slog.Debug("failed to get account info to fixup services", "err", err) accountInfo = &client.AccountInfo{} } @@ -83,7 +83,7 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // Ignore "build" config if we have "image", unless in --build or --force mode if svccfg.Image != "" && svccfg.Build != nil && upload != UploadModeDigest && upload != UploadModeForce { - term.Warnf("service %q: using published image instead of rebuilding; pass --build to build and publish a new image", svccfg.Name) + slog.WarnContext(ctx, fmt.Sprintf("service %q: using published image instead of rebuilding; pass --build to build and publish a new image", svccfg.Name)) svccfg.Build = nil } @@ -107,7 +107,7 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // Check if the dockerfile exists dockerfilePath := filepath.Join(svccfg.Build.Context, svccfg.Build.Dockerfile) if _, err := os.Stat(dockerfilePath); err != nil { - term.Debugf("stat %q: %v", dockerfilePath, err) + slog.Debug("stat dockerfile", "path", dockerfilePath, "err", err) // In this case we know that the dockerfile is not in the location the compose file specifies, // so can assume that the dockerfile has been normalized to the default "Dockerfile". if svccfg.Build.Dockerfile != "Dockerfile" { @@ -155,14 +155,14 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo } if len(removedArgs) > 0 { - term.Warnf("service %q: skipping unset build argument %q", svccfg.Name, removedArgs) + slog.WarnContext(ctx, fmt.Sprintf("service %q: skipping unset build argument %q", svccfg.Name, removedArgs)) } } // Fixup secret references; secrets are supposed to be files, not env, but it's kept for backward compatibility for i, secret := range svccfg.Secrets { if i == 0 { // only warn once - term.Warnf("service %q: secrets will be exposed as environment variables, not files (use 'environment' instead)", svccfg.Name) + slog.WarnContext(ctx, fmt.Sprintf("service %q: secrets will be exposed as environment variables, not files (use 'environment' instead)", svccfg.Name)) } svccfg.Environment[secret.Source] = nil } @@ -176,7 +176,7 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // A bug in Compose-go env file parsing can cause empty keys if key == "" { if !shownOnce { - term.Warnf("service %q: skipping unset environment variable key", svccfg.Name) + slog.WarnContext(ctx, fmt.Sprintf("service %q: skipping unset environment variable key", svccfg.Name)) shownOnce = true } delete(svccfg.Environment, key) // remove the empty key; this is safe @@ -204,17 +204,17 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo } if len(notAdjusted) > 0 { - term.Warnf("service %q: environment variable(s) %q will use the `defang config` value instead of adjusted service name", svccfg.Name, notAdjusted) + slog.WarnContext(ctx, fmt.Sprintf("service %q: environment variable(s) %q will use the `defang config` value instead of adjusted service name", svccfg.Name, notAdjusted)) } if len(overridden) > 0 { - term.Warnf("service %q: environment variable(s) %q overridden by config", svccfg.Name, overridden) + slog.WarnContext(ctx, fmt.Sprintf("service %q: environment variable(s) %q overridden by config", svccfg.Name, overridden)) } _, scaling := svccfg.Extensions["x-defang-autoscaling"] if scaling { if _, ok := provider.(*client.PlaygroundProvider); ok { - term.Warnf("service %q: auto-scaling is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name) + slog.WarnContext(ctx, fmt.Sprintf("service %q: auto-scaling is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name)) } } @@ -252,7 +252,7 @@ func fixupLLM(svccfg *composeTypes.ServiceConfig) { // HACK: we must have at least one host port to get a CNAME for the service // litellm listens on 4000 by default var port uint32 = liteLLMPort - term.Debugf("service %q: adding LLM host port %d", svccfg.Name, port) + slog.Debug("adding LLM host port", "service", svccfg.Name, "port", port) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } } @@ -260,7 +260,7 @@ func fixupLLM(svccfg *composeTypes.ServiceConfig) { func fixupPostgresService(svccfg *composeTypes.ServiceConfig, provider client.Provider, upload UploadMode) error { _, managedPostgres := svccfg.Extensions["x-defang-postgres"] if _, ok := provider.(*client.PlaygroundProvider); ok && managedPostgres && upload != UploadModeEstimate { - term.Warnf("service %q: managed postgres is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: managed postgres is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name)) } if len(svccfg.Ports) == 0 { // HACK: we must have at least one host port to get a CNAME for the service @@ -273,7 +273,7 @@ func fixupPostgresService(svccfg *composeTypes.ServiceConfig, provider client.Pr return err } } - term.Debugf("service %q: adding postgres host port %d", svccfg.Name, port) + slog.Debug("adding postgres host port", "service", svccfg.Name, "port", port) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } else { fixupIngressPorts(svccfg) @@ -284,7 +284,7 @@ func fixupPostgresService(svccfg *composeTypes.ServiceConfig, provider client.Pr func fixupMongoService(svccfg *composeTypes.ServiceConfig, provider client.Provider, upload UploadMode) error { _, managedMongo := svccfg.Extensions["x-defang-mongodb"] if _, ok := provider.(*client.PlaygroundProvider); ok && managedMongo && upload != UploadModeEstimate { - term.Warnf("service %q: managed mongodb is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: managed mongodb is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name)) } if len(svccfg.Ports) == 0 { // HACK: we must have at least one host port to get a CNAME for the service @@ -311,7 +311,7 @@ func fixupMongoService(svccfg *composeTypes.ServiceConfig, provider client.Provi } break // done } - term.Debugf("service %q: adding mongodb host port %d", svccfg.Name, port) + slog.Debug("adding mongodb host port", "service", svccfg.Name, "port", port) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } else { fixupIngressPorts(svccfg) @@ -322,7 +322,7 @@ func fixupMongoService(svccfg *composeTypes.ServiceConfig, provider client.Provi func fixupRedisService(svccfg *composeTypes.ServiceConfig, provider client.Provider, upload UploadMode) error { _, managedRedis := svccfg.Extensions["x-defang-redis"] if _, ok := provider.(*client.PlaygroundProvider); ok && managedRedis && upload != UploadModeEstimate { - term.Warnf("service %q: Managed redis is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: Managed redis is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name)) } if len(svccfg.Ports) == 0 { // HACK: we must have at least one host port to get a CNAME for the service https://redis.io/docs/latest/operate/oss_and_stack/management/config/ @@ -339,7 +339,7 @@ func fixupRedisService(svccfg *composeTypes.ServiceConfig, provider client.Provi // continue; last one wins } } - term.Debugf("service %q: adding redis host port %d", svccfg.Name, port) + slog.Debug("adding redis host port", "service", svccfg.Name, "port", port) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } else { fixupIngressPorts(svccfg) @@ -350,7 +350,7 @@ func fixupRedisService(svccfg *composeTypes.ServiceConfig, provider client.Provi func fixupIngressPorts(svccfg *composeTypes.ServiceConfig) { for i, port := range svccfg.Ports { if port.Mode == Mode_INGRESS || port.Mode == "" { - term.Debugf("service %q: changing port %d to host mode", svccfg.Name, port.Target) + slog.Debug("changing port to host mode", "service", svccfg.Name, "port", port.Target) svccfg.Ports[i].Mode = Mode_HOST } } @@ -448,7 +448,7 @@ func configureAccessGateway(svccfg *composeTypes.ServiceConfig, project *compose if openAIKey == "" { openAIKey = *key } else if *key != openAIKey { - term.Errorf("multiple different OPENAI_API_KEY values found in services depending on %q", svccfg.Name) + slog.Error(fmt.Sprintf("multiple different OPENAI_API_KEY values found in services depending on %q", svccfg.Name)) break } } @@ -542,16 +542,16 @@ func GetImageRepo(image string) string { func fixupPort(port composeTypes.ServicePortConfig) composeTypes.ServicePortConfig { switch port.Mode { case "": - term.Warnf("port %d: no 'mode' was specified; defaulting to 'ingress' (add 'mode: ingress' to silence)", port.Target) + slog.Warn(fmt.Sprintf("port %d: no 'mode' was specified; defaulting to 'ingress' (add 'mode: ingress' to silence)", port.Target)) fallthrough case Mode_INGRESS: // This code is unnecessarily complex because compose-go silently converts short `ports:` syntax to ingress+tcp if port.Protocol == Protocol_UDP { - term.Warnf("port %d: UDP ports default to 'host' mode (add 'mode: host' to silence)", port.Target) + slog.Warn(fmt.Sprintf("port %d: UDP ports default to 'host' mode (add 'mode: host' to silence)", port.Target)) port.Mode = Mode_HOST } else { if port.Published != "" { - term.Debugf("port %d: ignoring 'published: %s' in 'ingress' mode", port.Target, port.Published) + slog.Debug("ignoring 'published' in 'ingress' mode", "port", port.Target, "published", port.Published) } if port.AppProtocol == "" { // TCP ingress is not supported; assuming HTTP (add 'app_protocol: http' to silence)" diff --git a/src/pkg/cli/compose/loader.go b/src/pkg/cli/compose/loader.go index 4c0aba535..342cd263f 100644 --- a/src/pkg/cli/compose/loader.go +++ b/src/pkg/cli/compose/loader.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "path/filepath" "regexp" @@ -120,7 +121,7 @@ func (l *Loader) loadProject(ctx context.Context, suppressWarn bool) (*Project, if term.DoDebug() { b, _ := yaml.Marshal(project) - term.Println(string(b)) + fmt.Println(string(b)) } l.cached = project @@ -177,16 +178,16 @@ func (l *Loader) newProjectOptions(suppressWarn bool) (*cli.ProjectOptions, erro if hasSubstitution(templ, key) { // We don't (yet) support substitution patterns during deployment if inEnv && !suppressWarn { - term.Warnf("Environment variable %q is ignored; add it to `.env` if needed", key) + slog.Warn(fmt.Sprintf("Environment variable %q is ignored; add it to `.env` if needed", key)) } else { - term.Debugf("Unresolved environment variable %q", key) + slog.Debug("Unresolved environment variable", "key", key) } return "", false } if inEnv && !suppressWarn { - term.Warnf("Environment variable %q is ignored; add it to `.env` or it may be resolved from config during deployment", key) + slog.Warn(fmt.Sprintf("Environment variable %q is ignored; add it to `.env` or it may be resolved from config during deployment", key)) } else { - term.Debugf("Environment variable %q was not resolved locally. It may be resolved from config during deployment", key) + slog.Debug("Environment variable was not resolved locally. It may be resolved from config during deployment", "key", key) } // Leave unresolved variables as-is for resolution later by CD return "${" + key + "}", true diff --git a/src/pkg/cli/compose/serviceNameReplacer.go b/src/pkg/cli/compose/serviceNameReplacer.go index 0ef4c4c75..a0d845c16 100644 --- a/src/pkg/cli/compose/serviceNameReplacer.go +++ b/src/pkg/cli/compose/serviceNameReplacer.go @@ -2,12 +2,13 @@ package compose import ( "context" + "fmt" + "log/slog" "regexp" "slices" "strings" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" composeTypes "github.com/compose-spec/compose-go/v2/types" ) @@ -29,7 +30,7 @@ type ServiceNameReplacer struct { func NewServiceNameReplacer(ctx context.Context, dnsResolver client.DNSResolver, project *composeTypes.Project) ServiceNameReplacer { var skipPublicReplacement bool if err := dnsResolver.UpdateShardDomain(ctx); err != nil { - term.Debugf("failed to update shard domain: %v", err) + slog.Debug("failed to update shard domain", "error", err) skipPublicReplacement = true } // Create a regexp to detect private service names in environment variable and build arg values @@ -74,7 +75,7 @@ func (s *ServiceNameReplacer) replaceServiceNameWithDNS(value string) string { serviceEnd := match[3] serviceName := value[serviceStart:serviceEnd] if s.skipPublicReplacement { - term.Warnf("service %q: reference to public DNS cannot be replaced in %q, use `defang login` and try again", serviceName, value) + slog.Warn(fmt.Sprintf("service %q: reference to public DNS cannot be replaced in %q, use `defang login` and try again", serviceName, value)) } else { return value[:serviceStart] + s.dnsResolver.ServicePublicDNS(NormalizeServiceName(serviceName), s.projectName) + value[serviceEnd:] } @@ -88,9 +89,9 @@ func (s *ServiceNameReplacer) ReplaceServiceNameWithDNS(serviceName string, key, val := s.replaceServiceNameWithDNS(value) if val != value { - term.Debugf("service %q: service name was adjusted: %s %q assigned value %q", serviceName, fixupTarget, key, val) + slog.Debug("service name was adjusted", "service", serviceName, "fixupTarget", fixupTarget, "key", key, "value", val) } else if s.publicServiceNames != nil && s.publicServiceNames.MatchString(value) { - term.Debugf("service %q: service name in the %s %q was not adjusted; only references to other services with port mode set to 'host' will be fixed-up", serviceName, fixupTarget, key) + slog.Debug("service name was not adjusted; only references to other services with port mode set to 'host' will be fixed-up", "service", serviceName, "fixupTarget", fixupTarget, "key", key) } return val diff --git a/src/pkg/cli/compose/serviceNameReplacer_test.go b/src/pkg/cli/compose/serviceNameReplacer_test.go index 209f836e3..fc7944be5 100644 --- a/src/pkg/cli/compose/serviceNameReplacer_test.go +++ b/src/pkg/cli/compose/serviceNameReplacer_test.go @@ -3,11 +3,13 @@ package compose import ( "bytes" "context" + "log/slog" "os" "testing" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/dns" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" composeTypes "github.com/compose-spec/compose-go/v2/types" ) @@ -121,6 +123,7 @@ func TestServiceNameReplacer(t *testing.T) { prevTerm := term.DefaultTerm var out, err bytes.Buffer term.DefaultTerm = term.NewTerm(os.Stdin, &out, &err) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) t.Cleanup(func() { term.DefaultTerm = prevTerm }) diff --git a/src/pkg/cli/compose/validation.go b/src/pkg/cli/compose/validation.go index b8f4e35ce..34eaa41b9 100644 --- a/src/pkg/cli/compose/validation.go +++ b/src/pkg/cli/compose/validation.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "path/filepath" "regexp" @@ -16,7 +17,6 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/clouds/gcp" "github.com/DefangLabs/defang/src/pkg/modes" - "github.com/DefangLabs/defang/src/pkg/term" composeTypes "github.com/compose-spec/compose-go/v2/types" ) @@ -59,16 +59,16 @@ func ValidateProject(project *composeTypes.Project, mode modes.Mode) error { func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.Project, mode modes.Mode) error { if svccfg.ReadOnly { - term.Debugf("service %q: unsupported compose directive: read_only", svccfg.Name) + slog.Debug("service: unsupported compose directive: read_only", "service", svccfg.Name) } if svccfg.Restart == "" { // This was a warning, but we don't really care and want to reduce the noise - term.Debugf("service %q: missing compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", svccfg.Name) + slog.Debug("service: missing compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", "service", svccfg.Name) } else if svccfg.Restart != "always" && svccfg.Restart != "unless-stopped" { - term.Debugf("service %q: unsupported compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", svccfg.Name) + slog.Debug("service: unsupported compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", "service", svccfg.Name) } if svccfg.ContainerName != "" { - term.Debugf("service %q: unsupported compose directive: container_name", svccfg.Name) + slog.Debug("service: unsupported compose directive: container_name", "service", svccfg.Name) } if svccfg.Hostname != "" { return fmt.Errorf("service %q: unsupported compose directive: hostname; consider using 'domainname' instead", svccfg.Name) @@ -77,7 +77,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: dns_search", svccfg.Name) } if len(svccfg.DNSOpts) != 0 { - term.Debugf("service %q: unsupported compose directive: dns_opt", svccfg.Name) + slog.Debug("service: unsupported compose directive: dns_opt", "service", svccfg.Name) } if len(svccfg.DNS) != 0 { return fmt.Errorf("service %q: unsupported compose directive: dns", svccfg.Name) @@ -95,37 +95,37 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: group_add", svccfg.Name) } if len(svccfg.Ipc) > 0 { - term.Debugf("service %q: unsupported compose directive: ipc", svccfg.Name) + slog.Debug("service: unsupported compose directive: ipc", "service", svccfg.Name) } if len(svccfg.Uts) > 0 { - term.Debugf("service %q: unsupported compose directive: uts", svccfg.Name) + slog.Debug("service: unsupported compose directive: uts", "service", svccfg.Name) } if svccfg.Isolation != "" { - term.Debugf("service %q: unsupported compose directive: isolation", svccfg.Name) + slog.Debug("service: unsupported compose directive: isolation", "service", svccfg.Name) } if svccfg.MacAddress != "" { - term.Debugf("service %q: unsupported compose directive: mac_address", svccfg.Name) + slog.Debug("service: unsupported compose directive: mac_address", "service", svccfg.Name) } if len(svccfg.Labels) > 0 { - term.Debugf("service %q: unsupported compose directive: labels", svccfg.Name) // TODO: add support for labels + slog.Debug("service: unsupported compose directive: labels", "service", svccfg.Name) // TODO: add support for labels } if len(svccfg.Links) > 0 { - term.Debugf("service %q: unsupported compose directive: links", svccfg.Name) + slog.Debug("service: unsupported compose directive: links", "service", svccfg.Name) } if svccfg.Logging != nil { - term.Debugf("service %q: unsupported compose directive: logging", svccfg.Name) + slog.Debug("service: unsupported compose directive: logging", "service", svccfg.Name) } for name := range svccfg.Networks { if _, ok := project.Networks[name]; !ok { // This was a warning, but we don't really care and want to reduce the noise - term.Debugf("service %q: network %q is not defined in the top-level networks section", svccfg.Name, name) + slog.Debug("service: network is not defined in the top-level networks section", "service", svccfg.Name, "network", name) } } if len(svccfg.Volumes) > 0 { - term.Warnf("service %q: unsupported compose directive: volumes", svccfg.Name) // TODO: add support for volumes + slog.Warn(fmt.Sprintf("service %q: unsupported compose directive: volumes", svccfg.Name)) // TODO: add support for volumes } if len(svccfg.VolumesFrom) > 0 { - term.Warnf("service %q: unsupported compose directive: volumes_from", svccfg.Name) // TODO: add support for volumes_from + slog.Warn(fmt.Sprintf("service %q: unsupported compose directive: volumes_from", svccfg.Name)) // TODO: add support for volumes_from } if svccfg.Build != nil { _, err := filepath.Abs(svccfg.Build.Context) @@ -144,22 +144,22 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: build ssh", svccfg.Name) } if len(svccfg.Build.Labels) != 0 { - term.Debugf("service %q: unsupported compose directive: build labels", svccfg.Name) // TODO: add support for Kaniko --label + slog.Debug("service: unsupported compose directive: build labels", "service", svccfg.Name) // TODO: add support for Kaniko --label } if len(svccfg.Build.CacheFrom) != 0 { - term.Debugf("service %q: unsupported compose directive: build cache_from", svccfg.Name) + slog.Debug("service: unsupported compose directive: build cache_from", "service", svccfg.Name) } if len(svccfg.Build.CacheTo) != 0 { - term.Debugf("service %q: unsupported compose directive: build cache_to", svccfg.Name) + slog.Debug("service: unsupported compose directive: build cache_to", "service", svccfg.Name) } if svccfg.Build.NoCache { - term.Debugf("service %q: unsupported compose directive: build no_cache", svccfg.Name) + slog.Debug("service: unsupported compose directive: build no_cache", "service", svccfg.Name) } if len(svccfg.Build.ExtraHosts) != 0 { return fmt.Errorf("service %q: unsupported compose directive: build extra_hosts", svccfg.Name) } if svccfg.Build.Isolation != "" { - term.Debugf("service %q: unsupported compose directive: build isolation", svccfg.Name) + slog.Debug("service: unsupported compose directive: build isolation", "service", svccfg.Name) } if svccfg.Build.Network != "" { return fmt.Errorf("service %q: unsupported compose directive: build network", svccfg.Name) @@ -183,7 +183,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: build additional_contexts", svccfg.Name) } if svccfg.Build.Ulimits != nil { - term.Warnf("service %q: unsupported compose directive: build ulimits", svccfg.Name) // TODO: add support for build ulimits + slog.Warn(fmt.Sprintf("service %q: unsupported compose directive: build ulimits", svccfg.Name)) // TODO: add support for build ulimits } } for _, secret := range svccfg.Secrets { @@ -193,11 +193,11 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P // secret.Target will always be automatically constructed by compose-go to "/run/secrets/" if s, ok := project.Secrets[secret.Source]; !ok { // This was a warning, but we don't really care and want to reduce the noise - term.Debugf("secret %q is not defined in the top-level secrets section", secret.Source) + slog.Debug("secret is not defined in the top-level secrets section", "secret", secret.Source) } else if s.Name != "" && s.Name != secret.Source { return fmt.Errorf("unsupported secret %q: cannot override name %q", secret.Source, s.Name) // TODO: support custom secret names } else if !s.External { - term.Warnf("unsupported secret %q: not marked external:true", secret.Source) // TODO: support secrets from environment/file + slog.Warn(fmt.Sprintf("unsupported secret %q: not marked external:true", secret.Source)) // TODO: support secrets from environment/file } } @@ -212,8 +212,8 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P // show warning if sensitive information is detected if isSecret { - term.Warnf("service %q: environment %q may contain sensitive information; consider using 'defang config set %s' to securely store this value", svccfg.Name, key, key) - term.Debugf("service %q: environment %q may contain detected secrets of type: %v", svccfg.Name, key, ds) + slog.Warn(fmt.Sprintf("service %q: environment %q may contain sensitive information; consider using 'defang config set %s' to securely store this value", svccfg.Name, key, key)) + slog.Debug("service: environment may contain detected secrets", "service", svccfg.Name, "key", key, "types", ds) } } } @@ -226,7 +226,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P // Show a warning when we have ingress ports but no explicit healthcheck for _, port := range svccfg.Ports { if port.Mode == Mode_INGRESS { - term.Warnf("service %q: ingress port %d without healthcheck; defaults to GET / HTTP/1.1", svccfg.Name, port.Target) + slog.Warn(fmt.Sprintf("service %q: ingress port %d without healthcheck; defaults to GET / HTTP/1.1", svccfg.Name, port.Target)) break } } @@ -235,14 +235,14 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P if svccfg.HealthCheck.Timeout != nil { timeout = time.Duration(*svccfg.HealthCheck.Timeout).Seconds() if _, frac := math.Modf(timeout); frac != 0 { - term.Warnf("service %q: healthcheck timeout must be a multiple of 1s", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: healthcheck timeout must be a multiple of 1s", svccfg.Name)) } } interval := 30.0 // default per compose spec if svccfg.HealthCheck.Interval != nil { interval = time.Duration(*svccfg.HealthCheck.Interval).Seconds() if _, frac := math.Modf(interval); frac != 0 { - term.Warnf("service %q: healthcheck interval must be a multiple of 1s", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: healthcheck interval must be a multiple of 1s", svccfg.Name)) } } // Technically this should test for <= but both interval and timeout have 30s as the default value @@ -250,10 +250,10 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: healthcheck timeout %fs must be positive and smaller than the interval %fs", svccfg.Name, timeout, interval) } if svccfg.HealthCheck.StartPeriod != nil { - term.Debugf("service %q: unsupported compose directive: healthcheck start_period", svccfg.Name) + slog.Debug("service: unsupported compose directive: healthcheck start_period", "service", svccfg.Name) } if svccfg.HealthCheck.StartInterval != nil { - term.Debugf("service %q: unsupported compose directive: healthcheck start_interval", svccfg.Name) + slog.Debug("service: unsupported compose directive: healthcheck start_interval", "service", svccfg.Name) } } var replicas int @@ -275,29 +275,29 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: deploy endpoint_mode", svccfg.Name) } if svccfg.Deploy.Resources.Limits != nil && svccfg.Deploy.Resources.Reservations == nil { - term.Debugf("service %q: no reservations specified; using limits as reservations", svccfg.Name) + slog.Debug("service: no reservations specified; using limits as reservations", "service", svccfg.Name) } reservations = getResourceReservations(svccfg.Deploy.Resources) if reservations != nil && reservations.NanoCPUs < 0 { // "0" just means "as small as possible" return fmt.Errorf("service %q: invalid value for cpus: %v", svccfg.Name, reservations.NanoCPUs) } if len(svccfg.Deploy.Labels) > 0 { - term.Debugf("service %q: unsupported compose directive: deploy labels", svccfg.Name) + slog.Debug("service: unsupported compose directive: deploy labels", "service", svccfg.Name) } if len(svccfg.Deploy.Placement.Constraints) != 0 || len(svccfg.Deploy.Placement.Preferences) != 0 || svccfg.Deploy.Placement.MaxReplicas != 0 { - term.Debugf("service %q: unsupported compose directive: deploy placement", svccfg.Name) + slog.Debug("service: unsupported compose directive: deploy placement", "service", svccfg.Name) } if svccfg.Deploy.Replicas != nil { replicas = *svccfg.Deploy.Replicas } } if mode == modes.ModeHighAvailability && replicas < 2 && svccfg.Extensions["x-defang-autoscaling"] == nil { - term.Warnf("service %q: high-availability mode requires at least 2 replicas or x-defang-autoscaling", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: high-availability mode requires at least 2 replicas or x-defang-autoscaling", svccfg.Name)) } if reservations == nil || reservations.MemoryBytes == 0 { // Don't show this warning for managed pseudo-services like CDN if svccfg.Extensions["x-defang-static-files"] == nil { - term.Warnf("service %q: missing memory reservation; using provider-specific defaults. Specify deploy.resources.reservations.memory to avoid out-of-memory errors", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: missing memory reservation; using provider-specific defaults. Specify deploy.resources.reservations.memory to avoid out-of-memory errors", svccfg.Name)) } } @@ -321,7 +321,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P if managedRedis { // Ensure the repo is a valid Redis repo if !IsRedisRepo(repo) { - term.Warnf("service %q: managed Redis service should use a redis or valkey image", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: managed Redis service should use a redis or valkey image", svccfg.Name)) } if _, err = validateManagedStore(redisExtension); err != nil { return fmt.Errorf("service %q: %w", svccfg.Name, err) @@ -332,7 +332,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P if managedPostgres { // Ensure the repo is a valid Postgres repo if !IsPostgresRepo(repo) { - term.Warnf("service %q: managed Postgres service should use a postgres image", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: managed Postgres service should use a postgres image", svccfg.Name)) } if _, err = validateManagedStore(postgresExtension); err != nil { return fmt.Errorf("service %q: %w", svccfg.Name, err) @@ -343,7 +343,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P if managedMongodb { // Ensure the repo is a valid MongoDB repo if !IsMongoRepo(repo) { - term.Warnf("service %q: managed MongoDB service should use a mongo image", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: managed MongoDB service should use a mongo image", svccfg.Name)) } if _, err = validateManagedStore(mongodbExtension); err != nil { return fmt.Errorf("service %q: %w", svccfg.Name, err) @@ -351,7 +351,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P } if !managedRedis && !managedPostgres && !managedMongodb && isStatefulImage(svccfg.Image) { - term.Warnf("service %q: stateful service will lose data on restart; use a managed service instead", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: stateful service will lose data on restart; use a managed service instead", svccfg.Name)) } for k := range svccfg.Extensions { @@ -365,7 +365,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P "x-defang-autoscaling": continue default: - term.Warnf("service %q: unsupported compose extension: %q", svccfg.Name, k) + slog.Warn(fmt.Sprintf("service %q: unsupported compose extension: %q", svccfg.Name, k)) } } @@ -401,19 +401,19 @@ func validatePort(port composeTypes.ServicePortConfig) error { portRange := strings.SplitN(port.Published, "-", 2) start, err := strconv.ParseUint(portRange[0], 10, 16) if err != nil { - term.Warnf("port %d: 'published' range start should be an integer; ignoring 'published: %v'", port.Target, portRange[0]) + slog.Warn(fmt.Sprintf("port %d: 'published' range start should be an integer; ignoring 'published: %v'", port.Target, portRange[0])) } else if len(portRange) == 2 { end, err := strconv.ParseUint(portRange[1], 10, 16) if err != nil { - term.Warnf("port %d: 'published' range end should be an integer; ignoring 'published: %v'", port.Target, portRange[1]) + slog.Warn(fmt.Sprintf("port %d: 'published' range end should be an integer; ignoring 'published: %v'", port.Target, portRange[1])) } else if start > end { - term.Warnf("port %d: 'published' range start should be less than end; ignoring 'published: %v'", port.Target, port.Published) + slog.Warn(fmt.Sprintf("port %d: 'published' range start should be less than end; ignoring 'published: %v'", port.Target, port.Published)) } else if port.Target < uint32(start) || port.Target > uint32(end) { - term.Warnf("port %d: 'published' range should include 'target'; ignoring 'published: %v'", port.Target, port.Published) + slog.Warn(fmt.Sprintf("port %d: 'published' range should include 'target'; ignoring 'published: %v'", port.Target, port.Published)) } } else { if start != uint64(port.Target) { - term.Warnf("port %d: 'published' should be equal to 'target'; ignoring 'published: %v'", port.Target, port.Published) + slog.Warn(fmt.Sprintf("port %d: 'published' should be equal to 'target'; ignoring 'published: %v'", port.Target, port.Published)) } } } diff --git a/src/pkg/cli/compose/validation_test.go b/src/pkg/cli/compose/validation_test.go index cd58976f3..f6c3d2f8f 100644 --- a/src/pkg/cli/compose/validation_test.go +++ b/src/pkg/cli/compose/validation_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "errors" + "log/slog" "os" "slices" "strings" @@ -11,6 +12,7 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/modes" "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" @@ -37,8 +39,9 @@ func TestValidationAndConvert(t *testing.T) { } testAllComposeFiles(t, func(t *testing.T, name, path string) { - logs := new(bytes.Buffer) - term.DefaultTerm = term.NewTerm(os.Stdin, logs, logs) + logBuf := new(bytes.Buffer) + term.DefaultTerm = term.NewTerm(os.Stdin, logBuf, logBuf) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) options := LoaderOptions{ConfigPaths: []string{path}} loader := Loader{options: options} @@ -53,7 +56,7 @@ func TestValidationAndConvert(t *testing.T) { if err := FixupServices(t.Context(), mockClient, project, UploadModeIgnore); err != nil { t.Logf("Service conversion failed: %v", err) - logs.WriteString("Error: " + err.Error() + "\n") // no coverage! + logBuf.WriteString("Error: " + err.Error() + "\n") // no coverage! } listConfigNames, err := listConfigNamesFunc(t.Context()) @@ -62,7 +65,7 @@ func TestValidationAndConvert(t *testing.T) { } if err := ValidateProjectConfig(project, listConfigNames); err != nil { t.Logf("Project config validation failed: %v", err) - logs.WriteString("Error: " + err.Error() + "\n") + logBuf.WriteString("Error: " + err.Error() + "\n") } mode := modes.ModeAffordable @@ -71,16 +74,16 @@ func TestValidationAndConvert(t *testing.T) { } if err := ValidateProject(project, mode); err != nil { t.Logf("Project validation failed: %v", err) - logs.WriteString("Error: " + err.Error() + "\n") // no coverage! + logBuf.WriteString("Error: " + err.Error() + "\n") // no coverage! } // The order of the services is not guaranteed, so we sort the logs before comparing - logLines := strings.SplitAfter(logs.String(), "\n") + logLines := strings.SplitAfter(logBuf.String(), "\n") slices.Sort(logLines) - logs = bytes.NewBufferString(strings.Join(logLines, "")) + logBuf = bytes.NewBufferString(strings.Join(logLines, "")) // Compare the logs with the warnings file - if err := pkg.Compare(logs.Bytes(), path+".warnings"); err != nil { + if err := pkg.Compare(logBuf.Bytes(), path+".warnings"); err != nil { t.Error(err) } }) diff --git a/src/pkg/cli/composeDown.go b/src/pkg/cli/composeDown.go index 4e9f869a4..18e6be75c 100644 --- a/src/pkg/cli/composeDown.go +++ b/src/pkg/cli/composeDown.go @@ -3,6 +3,7 @@ package cli import ( "context" "errors" + "log/slog" "github.com/AlecAivazis/survey/v2" "github.com/DefangLabs/defang/src/pkg/cli/client" @@ -12,7 +13,7 @@ import ( ) func ComposeDown(ctx context.Context, projectName string, fabric client.FabricClient, provider client.Provider) (types.ETag, error) { - term.Debugf("Destroying project %q", projectName) + slog.Debug("Destroying project", "project", projectName) // If no names are provided, destroy the entire project return CdCommand(ctx, projectName, provider, fabric, client.CdCommandDestroy) @@ -33,6 +34,6 @@ func InteractiveComposeDown(ctx context.Context, projectName string, fabric clie return "", ErrDoNotComposeDown } - term.Info("Deactivating project " + projectName) + slog.InfoContext(ctx, "Deactivating project "+projectName) return ComposeDown(ctx, projectName, fabric, provider) } diff --git a/src/pkg/cli/composeUp.go b/src/pkg/cli/composeUp.go index 0489f8b5d..5db652875 100644 --- a/src/pkg/cli/composeUp.go +++ b/src/pkg/cli/composeUp.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" @@ -44,7 +45,7 @@ func checkDeploymentMode(prevMode, newMode modes.Mode) (modes.Mode, error) { switch newMode { case modes.ModeUnspecified: if prevMode != modes.ModeUnspecified { - term.Debug("No deployment mode specified; using previous deployment mode:", prevMode) + slog.Debug(fmt.Sprint("No deployment mode specified; using previous deployment mode:", prevMode)) newMode = prevMode } case modes.ModeAffordable: @@ -52,11 +53,11 @@ func checkDeploymentMode(prevMode, newMode modes.Mode) (modes.Mode, error) { case modes.ModeHighAvailability: return newMode, fmt.Errorf("will not downgrade deployment mode from %s to %s; use %s", prevMode, newMode, modes.ModeBalanced) case modes.ModeBalanced: - term.Warnf("Downgrading deployment mode from %s to %s", prevMode, newMode) + slog.Warn(fmt.Sprintf("Downgrading deployment mode from %s to %s", prevMode, newMode)) } case modes.ModeBalanced: if prevMode == modes.ModeHighAvailability { - term.Warnf("Downgrading deployment mode from %s to %s", prevMode, newMode) + slog.Warn(fmt.Sprintf("Downgrading deployment mode from %s to %s", prevMode, newMode)) } case modes.ModeHighAvailability: // from anything to high-availability is allowed @@ -119,7 +120,7 @@ func ComposeUp(ctx context.Context, fabric client.FabricClient, provider client. Stack: provider.GetStackNameForDomain(), }) if err != nil { - term.Debug("GetDelegateSubdomainZone failed:", err) + slog.Debug(fmt.Sprint("GetDelegateSubdomainZone failed:", err)) return nil, project, errors.New("failed to get delegate domain") } @@ -208,12 +209,12 @@ func ComposeUp(ctx context.Context, fabric client.FabricClient, provider client. CdId: resp.CdId, }) if err != nil { - term.Debug("Failed to record deployment:", err) - term.Warn("Unable to update deployment history; deployment will proceed anyway.") + slog.Debug(fmt.Sprint("Failed to record deployment:", err)) + slog.WarnContext(ctx, "Unable to update deployment history; deployment will proceed anyway.") } if term.DoDebug() { - term.Println("Project:", project.Name) + fmt.Println("Project:", project.Name) for _, serviceInfo := range resp.Services { PrintObject(serviceInfo.Service.Name, serviceInfo) } diff --git a/src/pkg/cli/configDelete.go b/src/pkg/cli/configDelete.go index 441e884a9..b7d90b871 100644 --- a/src/pkg/cli/configDelete.go +++ b/src/pkg/cli/configDelete.go @@ -2,15 +2,15 @@ package cli import ( "context" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/dryrun" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) func ConfigDelete(ctx context.Context, projectName string, provider client.Provider, names ...string) error { - term.Debugf("Deleting config %v in project %q", names, projectName) + slog.Debug("Deleting config in project", "names", names, "project", projectName) if dryrun.DoDryRun { return dryrun.ErrDryRun diff --git a/src/pkg/cli/configList.go b/src/pkg/cli/configList.go index 334e53c15..7bcc707fd 100644 --- a/src/pkg/cli/configList.go +++ b/src/pkg/cli/configList.go @@ -2,6 +2,7 @@ package cli import ( "context" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/term" @@ -13,7 +14,7 @@ type PrintConfig struct { } func ConfigList(ctx context.Context, projectName string, provider client.Provider) error { - term.Debugf("Listing config in project %q", projectName) + slog.Debug("Listing config", "project", projectName) config, err := provider.ListConfig(ctx, &defangv1.ListConfigsRequest{Project: projectName}) if err != nil { @@ -22,8 +23,8 @@ func ConfigList(ctx context.Context, projectName string, provider client.Provide numConfigs := len(config.Names) if numConfigs == 0 { - _, err := term.Warn("No configs found") - return err + slog.WarnContext(ctx, "No configs found") + return nil } configNames := make([]PrintConfig, numConfigs) diff --git a/src/pkg/cli/configList_test.go b/src/pkg/cli/configList_test.go index e22da4515..5bcb918f6 100644 --- a/src/pkg/cli/configList_test.go +++ b/src/pkg/cli/configList_test.go @@ -2,12 +2,14 @@ package cli import ( "context" + "log/slog" "net/http/httptest" "strings" "testing" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" @@ -53,6 +55,9 @@ func TestConfigList(t *testing.T) { t.Run("no configs", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := ConfigList(ctx, "emptyconfigs", &provider) if err != nil { @@ -69,6 +74,9 @@ func TestConfigList(t *testing.T) { t.Run("some configs", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := ConfigList(ctx, "test", &provider) if err != nil { diff --git a/src/pkg/cli/configResolution.go b/src/pkg/cli/configResolution.go index a5ddd5a9e..482e046e6 100644 --- a/src/pkg/cli/configResolution.go +++ b/src/pkg/cli/configResolution.go @@ -2,6 +2,7 @@ package cli import ( "context" + "log/slog" "slices" "strings" @@ -112,7 +113,7 @@ func printConfigResolutionSummary(project *types.Project, defangConfig []string, projectEnvVars = slices.Compact(projectEnvVars) - term.Info("Service environment variables resolution summary:") + slog.Info("Service environment variables resolution summary:") return term.Table(projectEnvVars, "Service", "Environment", "Source", "Value") } diff --git a/src/pkg/cli/configResolution_test.go b/src/pkg/cli/configResolution_test.go index 927cfc60b..120636b9f 100644 --- a/src/pkg/cli/configResolution_test.go +++ b/src/pkg/cli/configResolution_test.go @@ -1,6 +1,7 @@ package cli import ( + "log/slog" "os" "path/filepath" "regexp" @@ -8,12 +9,16 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli/compose" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" ) func TestPrintConfigResolutionSummary(t *testing.T) { testAllConfigResolutionFiles(t, "testdata/config-resolution", func(t *testing.T, name, path string) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) loader := compose.NewLoader(compose.WithPath(path)) proj, err := loader.LoadProject(t.Context()) @@ -53,6 +58,9 @@ func TestPrintConfigResolutionSummary(t *testing.T) { func TestPrintRedactedConfigResolutionSummary(t *testing.T) { testAllConfigResolutionFiles(t, "testdata/redact-config", func(t *testing.T, name, path string) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) loader := compose.NewLoader(compose.WithPath(path)) proj, err := loader.LoadProject(t.Context()) diff --git a/src/pkg/cli/configSet.go b/src/pkg/cli/configSet.go index bfa7b2bb2..06da4c740 100644 --- a/src/pkg/cli/configSet.go +++ b/src/pkg/cli/configSet.go @@ -3,10 +3,10 @@ package cli import ( "context" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/dryrun" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -28,7 +28,7 @@ type ConfigManager interface { } func ConfigSet(ctx context.Context, projectName string, provider ConfigManager, name string, value string, options ConfigSetOptions) (bool, error) { - term.Debugf("Setting config %q in project %q", name, projectName) + slog.Debug("Setting config", "name", name, "project", projectName) if !pkg.IsValidSecretName(name) { return false, ErrInvalidConfigName{Name: name} diff --git a/src/pkg/cli/connect.go b/src/pkg/cli/connect.go index e46ebcf77..e1b57fd33 100644 --- a/src/pkg/cli/connect.go +++ b/src/pkg/cli/connect.go @@ -2,19 +2,20 @@ package cli import ( "context" + "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/aws" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/do" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/gcp" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" ) // Connect builds a client carrying the requested tenant (name or ID). func Connect(fabricAddr string, requestedTenant types.TenantNameOrID) *client.GrpcClient { host := client.NormalizeHost(fabricAddr) - term.Debugf("Using tenant %q for cluster %q", requestedTenant, host) + slog.Debug("Using tenant for cluster", "tenant", requestedTenant, "cluster", host) accessToken := client.GetExistingToken(host) return client.NewGrpcClient(host, accessToken, requestedTenant) @@ -25,7 +26,7 @@ func ConnectWithTenant(ctx context.Context, fabricAddr string, requestedTenant t resp, err := grpcClient.WhoAmI(ctx) if err != nil { - term.Debug("Unable to validate tenant with server:", err) + slog.Debug(fmt.Sprint("Unable to validate tenant with server:", err)) return grpcClient, err } @@ -35,7 +36,7 @@ func ConnectWithTenant(ctx context.Context, fabricAddr string, requestedTenant t func NewProvider(ctx context.Context, providerID client.ProviderID, fabricClient client.FabricClient, stack string) client.Provider { var provider client.Provider - term.Debugf("Creating %s provider", providerID) + slog.Debug("Creating provider", "provider", providerID) switch providerID { case client.ProviderAWS: provider = aws.NewByocProvider(ctx, fabricClient.GetTenantName(), stack) diff --git a/src/pkg/cli/deploymentsList.go b/src/pkg/cli/deploymentsList.go index 92f33194e..071091d0e 100644 --- a/src/pkg/cli/deploymentsList.go +++ b/src/pkg/cli/deploymentsList.go @@ -2,6 +2,8 @@ package cli import ( "context" + "fmt" + "log/slog" "sort" "strings" "time" @@ -46,13 +48,12 @@ func DeploymentsList(ctx context.Context, client client.FabricClient, params Lis if params.ListType == defangv1.DeploymentType_DEPLOYMENT_TYPE_ACTIVE { active = " active" } - var err error if params.ProjectName == "" { - _, err = term.Warnf("No%s deployments found; use --workspace to specify a different workspace", active) + slog.WarnContext(ctx, fmt.Sprintf("No%s deployments found; use --workspace to specify a different workspace", active)) } else { - _, err = term.Warnf("No%s deployments found for project %q", active, params.ProjectName) + slog.WarnContext(ctx, fmt.Sprintf("No%s deployments found for project %q", active, params.ProjectName)) } - return err + return nil } // map to Deployment struct diff --git a/src/pkg/cli/deploymentsList_test.go b/src/pkg/cli/deploymentsList_test.go index b6b646ad3..87026573f 100644 --- a/src/pkg/cli/deploymentsList_test.go +++ b/src/pkg/cli/deploymentsList_test.go @@ -2,12 +2,14 @@ package cli import ( "context" + "log/slog" "net/http/httptest" "strings" "testing" "time" connect "connectrpc.com/connect" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" @@ -62,6 +64,9 @@ func TestDeploymentsList(t *testing.T) { t.Run("no deployments", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_HISTORY, ProjectName: "empty", @@ -81,6 +86,9 @@ func TestDeploymentsList(t *testing.T) { t.Run("some deployments", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_HISTORY, ProjectName: "test", @@ -133,6 +141,9 @@ func TestActiveDeployments(t *testing.T) { t.Run("no active deployments", func(t *testing.T) { fabricServer.testDeploymentsData = nil stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_ACTIVE, @@ -169,6 +180,9 @@ func TestActiveDeployments(t *testing.T) { fabricServer.testDeploymentsData = activeDeployments stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_ACTIVE, ProjectName: "", diff --git a/src/pkg/cli/estimate.go b/src/pkg/cli/estimate.go index 4ea2ffd8f..45ad5e469 100644 --- a/src/pkg/cli/estimate.go +++ b/src/pkg/cli/estimate.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "sort" "strconv" @@ -21,13 +22,13 @@ import ( ) func RunEstimate(ctx context.Context, project *compose.Project, client client.FabricClient, previewProvider client.Provider, estimateProviderID client.ProviderID, region string, mode modes.Mode) (*defangv1.EstimateResponse, error) { - term.Debugf("Running estimate for project %s in region %s with mode %s", project.Name, region, mode) + slog.Debug("Running estimate for project", "project", project.Name, "region", region, "mode", mode) preview, err := GeneratePreview(ctx, project, client, previewProvider, estimateProviderID, mode, region) if err != nil { return nil, err } - term.Info("Preparing estimate") + slog.InfoContext(ctx, "Preparing estimate") estimate, err := client.Estimate(ctx, &defangv1.EstimateRequest{ Provider: estimateProviderID.Value(), @@ -54,7 +55,7 @@ func GeneratePreview(ctx context.Context, project *compose.Project, client clien return "", fmt.Errorf("failed to marshal compose project: %w", err) } - term.Debugf("Fixedup project: %s", string(composeData)) + slog.Debug("Fixedup project: " + string(composeData)) resp, err := client.Preview(ctx, &defangv1.PreviewRequest{ Provider: estimateProviderID.Value(), @@ -67,7 +68,7 @@ func GeneratePreview(ctx context.Context, project *compose.Project, client clien return "", err } - term.Info("Generating deployment preview, this may take a few minutes...") + slog.InfoContext(ctx, "Generating deployment preview, this may take a few minutes...") var pulumiPreviewLogLines []string tailOptions := TailOptions{ Deployment: resp.Etag, diff --git a/src/pkg/cli/generate.go b/src/pkg/cli/generate.go index 3147ed6ca..e1cc80d77 100644 --- a/src/pkg/cli/generate.go +++ b/src/pkg/cli/generate.go @@ -2,6 +2,8 @@ package cli import ( "context" + "fmt" + "log/slog" "os" "path/filepath" @@ -22,7 +24,7 @@ type GenerateArgs struct { func GenerateWithAI(ctx context.Context, client client.FabricClient, args GenerateArgs) ([]string, error) { if dryrun.DoDryRun { - term.Warn("Dry run, no project files will be generated") + slog.WarnContext(ctx, "Dry run, no project files will be generated") return nil, dryrun.ErrDryRun } @@ -42,19 +44,19 @@ func GenerateWithAI(ctx context.Context, client client.FabricClient, args Genera term.Printc(term.DebugColor, file.Name+"\n```") term.Printc(term.DebugColor, file.Content) term.Printc(term.DebugColor, "```") - term.Println("") - term.Println("") + fmt.Println("") + fmt.Println("") } } // Write each file to disk - term.Info("Writing files to disk...") + slog.InfoContext(ctx, "Writing files to disk...") if err := os.MkdirAll(args.Folder, 0755); err != nil { return nil, err } for _, file := range response.Files { // Print the files that were generated - term.Println(" -", file.Name) + fmt.Println(" -", file.Name) // TODO: this will overwrite existing files if err = os.WriteFile(filepath.Join(args.Folder, file.Name), []byte(file.Content), 0644); err != nil { return nil, err diff --git a/src/pkg/cli/getServices.go b/src/pkg/cli/getServices.go index f962a8acd..c37e3fa17 100644 --- a/src/pkg/cli/getServices.go +++ b/src/pkg/cli/getServices.go @@ -3,6 +3,7 @@ package cli import ( "context" "fmt" + "log/slog" "net/http" "net/url" "strings" @@ -51,7 +52,7 @@ func PrintLongServices(ctx context.Context, projectName string, provider client. } func GetServices(ctx context.Context, projectName string, provider client.Provider) ([]ServiceLineItem, error) { - term.Debugf("Listing services in project %q", projectName) + slog.Debug("Listing services in project", "project", projectName) servicesResponse, err := provider.GetServices(ctx, &defangv1.GetServicesRequest{Project: projectName}) if err != nil { @@ -112,7 +113,7 @@ func GetHealthcheckResults(ctx context.Context, serviceInfos []*defangv1.Service defer wg.Done() result, err := RunHealthcheck(ctx, serviceInfo.Service.Name, "https://"+endpoint, serviceInfo.HealthcheckPath) if err != nil { - term.Debugf("Healthcheck error for service %q at endpoint %q: %s", serviceInfo.Service.Name, endpoint, err.Error()) + slog.Debug("Healthcheck error", "service", serviceInfo.Service.Name, "endpoint", endpoint, "err", err) result = "error" } *results[serviceInfo.Service.Name] = result @@ -135,17 +136,17 @@ func RunHealthcheck(ctx context.Context, name, endpoint, path string) (string, e if err != nil { return "", err } - term.Debugf("[%s] checking health at %s", name, url) + slog.Debug("checking health", "service", name, "url", url) resp, err := http.DefaultClient.Do(req) if err != nil { return "", err } defer resp.Body.Close() if resp.StatusCode >= 200 && resp.StatusCode < 400 { - term.Debugf("[%s] ✔ healthy", name) + slog.Debug("healthy", "service", name) return "healthy", nil } else { - term.Debugf("[%s] ✘ unhealthy (%s)", name, resp.Status) + slog.Debug("unhealthy", "service", name, "status", resp.Status) return "unhealthy (" + resp.Status + ")", nil } } @@ -211,7 +212,7 @@ func PrintServiceStatesAndEndpoints(services []ServiceLineItem) error { } if showCertGenerateHint { - term.Info("Run `defang cert generate` to get a TLS certificate for your service(s)") + slog.Info("Run `defang cert generate` to get a TLS certificate for your service(s)") } return nil diff --git a/src/pkg/cli/getServices_test.go b/src/pkg/cli/getServices_test.go index 15e66f5ad..be907ddf5 100644 --- a/src/pkg/cli/getServices_test.go +++ b/src/pkg/cli/getServices_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "errors" + "log/slog" "net/http" "net/http/httptest" "os" @@ -13,6 +14,7 @@ import ( "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" @@ -268,6 +270,7 @@ func TestPrintServiceStatesAndEndpointsAndDomainname(t *testing.T) { var stdout, stderr bytes.Buffer term.DefaultTerm = term.NewTerm(os.Stdin, &stdout, &stderr) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) tests := []struct { name string diff --git a/src/pkg/cli/install_cd.go b/src/pkg/cli/install_cd.go index 6e2db7392..e4194f321 100644 --- a/src/pkg/cli/install_cd.go +++ b/src/pkg/cli/install_cd.go @@ -3,16 +3,16 @@ package cli import ( "context" "errors" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/dryrun" - "github.com/DefangLabs/defang/src/pkg/term" ) func InstallCD(ctx context.Context, provider client.Provider, force bool) error { if dryrun.DoDryRun { return errors.New("dry run") } - term.Info("Installing the CD resources into the cluster") + slog.InfoContext(ctx, "Installing the CD resources into the cluster") return provider.SetUpCD(ctx, force) } diff --git a/src/pkg/cli/logout.go b/src/pkg/cli/logout.go index c9fda9a9b..de4bef6ee 100644 --- a/src/pkg/cli/logout.go +++ b/src/pkg/cli/logout.go @@ -2,15 +2,16 @@ package cli import ( "context" + "fmt" + "log/slog" "os" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" ) func Logout(ctx context.Context, fabricClient client.FabricClient, fabricAddr string) error { - term.Debug("Logging out") + slog.Debug("Logging out") err := fabricClient.RevokeToken(ctx) // Ignore unauthenticated errors, since we're logging out anyway if err != nil && connect.CodeOf(err) != connect.CodeUnauthenticated { @@ -18,7 +19,7 @@ func Logout(ctx context.Context, fabricClient client.FabricClient, fabricAddr st } if err := client.TokenStore.Delete(client.TokenStorageName(fabricAddr)); err != nil { - term.Warn("Failed to remove stored token:", err) + slog.WarnContext(ctx, fmt.Sprint("Failed to remove stored token:", err)) // Don't return the error - we still consider logout successful } @@ -26,9 +27,9 @@ func Logout(ctx context.Context, fabricClient client.FabricClient, fabricAddr st jwtFile, err := client.GetWebIdentityTokenFile(fabricAddr) if err == nil { if err := os.Remove(jwtFile); err != nil && !os.IsNotExist(err) { - term.Warn("Failed to remove JWT token file:", err) + slog.WarnContext(ctx, fmt.Sprint("Failed to remove JWT token file:", err)) } else if err == nil { - term.Debug("Removed JWT token file:", jwtFile) + slog.Debug(fmt.Sprint("Removed JWT token file:", jwtFile)) } } diff --git a/src/pkg/cli/new.go b/src/pkg/cli/new.go index cdc465536..e3458e8ff 100644 --- a/src/pkg/cli/new.go +++ b/src/pkg/cli/new.go @@ -8,12 +8,12 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" "github.com/DefangLabs/defang/src/pkg/http" - "github.com/DefangLabs/defang/src/pkg/term" ) var ErrSampleNotFound = errors.New("sample not found") @@ -36,7 +36,7 @@ func FetchSamples(ctx context.Context) ([]Sample, error) { return nil, err } defer resp.Body.Close() - term.Debug(resp.Header) + slog.Debug("Response header", "header", resp.Header) reader := resp.Body if resp.Header.Get("Content-Encoding") == "gzip" { reader, err = gzip.NewReader(resp.Body) @@ -69,14 +69,14 @@ func copyFromSamples(ctx context.Context, dir string, names []string, skipExisti return err } defer resp.Body.Close() - term.Debug(resp.Header) + slog.Debug("Response header", "header", resp.Header) tarball, err := gzip.NewReader(resp.Body) if err != nil { return fmt.Errorf("failed to read tarball: %w", err) } defer tarball.Close() tarReader := tar.NewReader(tarball) - term.Info("Copying files to disk...") + slog.InfoContext(ctx, "Copying files to disk...") sampleFound := false @@ -101,7 +101,7 @@ func copyFromSamples(ctx context.Context, dir string, names []string, skipExisti prefix := fmt.Sprintf("%s-%s/samples/%s/", repo, branch, name) if base, ok := strings.CutPrefix(h.Name, prefix); ok && len(base) > 0 { sampleFound = true - term.Println(" -", base) + fmt.Println(" -", base) path := filepath.Join(dir, subdir, base) if h.FileInfo().IsDir() { if err := os.MkdirAll(path, 0755); err != nil { @@ -114,7 +114,7 @@ func copyFromSamples(ctx context.Context, dir string, names []string, skipExisti if !skipExisting || !os.IsExist(err) { return err } - term.Warnf("File already exists, skipping: %q", path) + slog.WarnContext(ctx, fmt.Sprintf("File already exists, skipping: %q", path)) } } } diff --git a/src/pkg/cli/subscribe.go b/src/pkg/cli/subscribe.go index 6b0b95d4a..b3950bd3d 100644 --- a/src/pkg/cli/subscribe.go +++ b/src/pkg/cli/subscribe.go @@ -3,11 +3,12 @@ package cli import ( "context" "errors" + "fmt" "iter" + "log/slog" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -24,7 +25,7 @@ func WaitServiceState( etag types.ETag, services []string, ) (ServiceStates, error) { - term.Debugf("waiting for services %v to reach state %s\n", services, targetState) // TODO: don't print in Go-routine + slog.Debug("waiting for services to reach state", "services", services, "state", targetState) // TODO: don't print in Go-routine if len(services) == 0 { return nil, ErrNothingToMonitor @@ -57,9 +58,9 @@ func WaitServiceState( // a minute and DelayBeforeRetry backs off exponentially up to 1 minute). if isTransientError(err) { if connect.CodeOf(err) == connect.CodeResourceExhausted { - term.Warnf("quota exceeded; will retry subscribe stream after backoff: %v", err) + slog.WarnContext(ctx, fmt.Sprintf("quota exceeded; will retry subscribe stream after backoff: %v", err)) } else { - term.Debugf("WaitServiceState: transient error, reconnecting subscribe stream: %v", err) + slog.Debug("WaitServiceState: transient error, reconnecting subscribe stream", "err", err) } if err := provider.DelayBeforeRetry(ctx); err != nil { return serviceStates, err @@ -82,16 +83,16 @@ func WaitServiceState( } } - term.Infof("Waiting for services to finish deploying: %q\n", pendingServices) // TODO: don't print in Go-routine + slog.InfoContext(ctx, fmt.Sprintf("Waiting for services to finish deploying: %q\n", pendingServices)) // TODO: don't print in Go-routine if msg == nil { continue } - term.Debugf("Service update: %s: state=%s and status=%s\n", msg.Name, msg.State, msg.Status) // TODO: don't print in Go-routine + slog.Debug("Service update", "name", msg.Name, "state", msg.State, "status", msg.Status) // TODO: don't print in Go-routine if _, ok := serviceStates[msg.Name]; !ok { - term.Debugf("unexpected service %s update", msg.Name) // TODO: don't print in Go-routine + slog.Debug("unexpected service update", "name", msg.Name) // TODO: don't print in Go-routine continue } if msg.State == defangv1.ServiceState_NOT_SPECIFIED { diff --git a/src/pkg/cli/tail.go b/src/pkg/cli/tail.go index 62f6afa48..72cc57f9d 100644 --- a/src/pkg/cli/tail.go +++ b/src/pkg/cli/tail.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "iter" + "log/slog" "net" "os" "regexp" @@ -146,7 +147,7 @@ func Tail(ctx context.Context, provider client.Provider, projectName string, opt options.LogType = logs.LogTypeAll } - term.Debugf("Tailing %s logs in project %q", options.LogType, projectName) + slog.Debug("Tailing logs in project", "logType", options.LogType, "project", projectName) if options.Deployment != "" { _, err := types.ParseEtag(options.Deployment) @@ -161,11 +162,11 @@ func Tail(ctx context.Context, provider client.Provider, projectName string, opt if _, err := provider.GetService(ctx, &defangv1.GetRequest{Project: projectName, Name: service}); err != nil { switch connect.CodeOf(err) { case connect.CodeNotFound: - term.Warnf("Service does not exist (yet): %q", service) + slog.WarnContext(ctx, fmt.Sprintf("Service does not exist (yet): %q", service)) case connect.CodeUnknown: // Ignore unknown (nil) errors default: - term.Warn(err) // TODO: use client.PrettyError(…) + slog.WarnContext(ctx, fmt.Sprintf("%v", err)) // TODO: use client.PrettyError(…) } } } @@ -241,7 +242,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin Limit: options.Limit, } - term.Debug("Tail request:", tailRequest) + slog.Debug(fmt.Sprint("Tail request:", tailRequest)) logSeq, err := provider.QueryLogs(ctx, tailRequest) if err != nil { @@ -268,7 +269,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if oldState, err := term.MakeUnbuf(int(os.Stdin.Fd())); err == nil { defer term.Restore(int(os.Stdin.Fd()), oldState) - term.Info("Showing only build logs and runtime errors. Press V to toggle verbose mode.") + slog.InfoContext(ctx, "Showing only build logs and runtime errors. Press V to toggle verbose mode.") input := term.NewNonBlockingStdin() defer input.Close() // abort the read loop go func() { @@ -290,7 +291,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if debug { debugStr = "ON" } - term.Info("Debug mode", debugStr) + slog.InfoContext(ctx, fmt.Sprint("Debug mode", debugStr)) track.Evt("Debug Toggled", P("debug", debug)) case 'v', 'V': verbose := !options.Verbose @@ -302,7 +303,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if toggleCount++; toggleCount == 4 && !verbose { modeStr += ". I like the way you work it, no verbosity." } - term.Info("Verbose mode", modeStr) + slog.InfoContext(ctx, fmt.Sprint("Verbose mode", modeStr)) track.Evt("Verbose Toggled", P("verbose", verbose), P("toggleCount", toggleCount)) } } @@ -328,7 +329,7 @@ func makeHeadBookendOptions(options *TailOptions, firstLogTime time.Time) *TailO func printHeadBookend(options *TailOptions, firstLogTime time.Time) { newOptions := makeHeadBookendOptions(options, firstLogTime) if !newOptions.Until.IsZero() { - term.Info("To view older logs, run: `defang logs" + newOptions.String() + "`") + slog.Info("To view older logs, run: `defang logs" + newOptions.String() + "`") } } @@ -346,7 +347,7 @@ func makeTailBookendOptions(options *TailOptions, lastLogTime time.Time) *TailOp func printTailBookend(options *TailOptions, lastLogTime time.Time) { newOptions := makeTailBookendOptions(options, lastLogTime) if !newOptions.Since.IsZero() { - term.Info("To view more recent logs, run: `defang logs" + newOptions.String() + "`") + slog.Info("To view more recent logs, run: `defang logs" + newOptions.String() + "`") } } @@ -376,10 +377,11 @@ func receiveLogs(ctx context.Context, provider client.Provider, projectName stri // Reconnect on transient errors if isTransientError(err) { - term.Debug("Disconnected:", err) + slog.Debug(fmt.Sprint("Disconnected:", err)) var spaces int if !options.Raw { - spaces, _ = term.Warnf("Reconnecting...\r") // overwritten below + slog.WarnContext(ctx, "Reconnecting...\r") + spaces = len(" ! Reconnecting...\r") // warnPrefix + message, used to clear the line } if err := provider.DelayBeforeRetry(ctx); err != nil { return err @@ -388,7 +390,7 @@ func receiveLogs(ctx context.Context, provider client.Provider, projectName stri stop() // stop the old iterator newLogSeq, err := provider.QueryLogs(ctx, tailRequest) if err != nil { - term.Debug("Reconnect failed:", err) + slog.Debug(fmt.Sprint("Reconnect failed:", err)) return err } next, stop = iter.Pull2(newLogSeq) @@ -443,7 +445,7 @@ func handleLogEntryMsgs(msg *defangv1.TailResponse, doSpinner bool, skipDuplicat err := handler(e, options, term.DefaultTerm) if err != nil { - term.Debug("Ending tail loop", err) + slog.Debug(fmt.Sprint("Ending tail loop", err)) return err } @@ -469,7 +471,7 @@ func logEntryPrintHandler(e *defangv1.LogEntry, options *TailOptions, t *term.Te if options.Raw { if e.Stderr { - term.Error(e.Message) + slog.Error(e.Message) } else { term.Println(e.Message) } diff --git a/src/pkg/cli/tailAndMonitor.go b/src/pkg/cli/tailAndMonitor.go index 183136d8f..68a788dd3 100644 --- a/src/pkg/cli/tailAndMonitor.go +++ b/src/pkg/cli/tailAndMonitor.go @@ -3,7 +3,9 @@ package cli import ( "context" "errors" + "fmt" "io" + "log/slog" "sync" "time" @@ -11,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -56,7 +57,7 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie // When CD fails, stop WaitServiceState cancelSvcStatus(cdErr) } else { - term.Info("Deployment complete. Waiting for services to be healthy...") + slog.InfoContext(ctx, "Deployment complete. Waiting for services to be healthy...") } }() @@ -72,10 +73,10 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie // blocking call to tail var tailErr error if err := Tail(tailCtx, provider, project.Name, tailOptions); err != nil { - term.Debug("Tail while monitoring stopped with", err, errors.Unwrap(err)) + slog.Debug(fmt.Sprint("Tail while monitoring stopped with", err, errors.Unwrap(err))) if connect.CodeOf(err) == connect.CodePermissionDenied { - term.Warn("Unable to tail logs. Waiting for the deployment to finish.") + slog.WarnContext(ctx, "Unable to tail logs. Waiting for the deployment to finish.") // If tail fails because of missing permission, we wait for the deployment to finish <-tailCtx.Done() // Get the actual error from the context so we won't print "Error: missing tail permission" @@ -87,14 +88,14 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie break // an end condition was detected; cdErr and/or svcErr might be nil case errors.Is(context.Cause(ctx), context.Canceled): - term.Warn("Deployment is not finished. Service(s) might not be running.") + slog.WarnContext(ctx, "Deployment is not finished. Service(s) might not be running.") case errors.Is(context.Cause(tailCtx), errMonitoringDone): break // the monitoring stopped the tail; cdErr and/or svcErr will have been set case errors.Is(context.Cause(ctx), context.DeadlineExceeded): // Tail was canceled when wait-timeout is reached; show a warning and exit with an error - term.Warn("Wait-timeout exceeded, detaching from logs. Deployment still in progress.") + slog.WarnContext(ctx, "Wait-timeout exceeded, detaching from logs. Deployment still in progress.") fallthrough default: diff --git a/src/pkg/cli/tail_test.go b/src/pkg/cli/tail_test.go index f97c3c54f..083be2f8e 100644 --- a/src/pkg/cli/tail_test.go +++ b/src/pkg/cli/tail_test.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "io" + "log/slog" "os" "strings" "testing" @@ -122,8 +123,11 @@ func TestTail(t *testing.T) { testTerm.ForceColor(true) defaultTerm := term.DefaultTerm term.DefaultTerm = testTerm + prevLogger := slog.Default() + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) t.Cleanup(func() { term.DefaultTerm = defaultTerm + slog.SetDefault(prevLogger) }) const projectName = "project1" @@ -185,10 +189,19 @@ func TestTail(t *testing.T) { } for i, g := range got { - e := expectedLogs[i] g = term.StripAnsi(g) - if got := strings.SplitN(g, " ", 2)[1]; got != e { // Remove the date from the log entry - t.Errorf("Tail() = %q, want %q", got, e) + if i == len(got)-1 { + g = strings.TrimSpace(g) + if !strings.HasPrefix(g, "! Reconnecting") { + t.Errorf("Tail() = %q, want something starting with %q", g, "! Reconnecting") + } + } else { + e := expectedLogs[i] + g = strings.TrimRight(g, " ") + e = strings.TrimRight(e, " ") + if got := strings.SplitN(g, " ", 2)[1]; got != e { + t.Errorf("Tail() = %q, want %q", got, e) + } } } @@ -228,10 +241,12 @@ func setupTestTerminal() (*bytes.Buffer, *bytes.Buffer, func()) { testTerm.ForceColor(true) defaultTerm := term.DefaultTerm term.DefaultTerm = testTerm + prevLogger := slog.Default() + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) - // Cleanup function to reset the terminal cleanup := func() { term.DefaultTerm = defaultTerm + slog.SetDefault(prevLogger) } return &stdout, &stderr, cleanup diff --git a/src/pkg/cli/teardown_cd.go b/src/pkg/cli/teardown_cd.go index 584da8567..aa318436e 100644 --- a/src/pkg/cli/teardown_cd.go +++ b/src/pkg/cli/teardown_cd.go @@ -5,12 +5,12 @@ import ( "context" "errors" "fmt" + "log/slog" "slices" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/state" "github.com/DefangLabs/defang/src/pkg/dryrun" - "github.com/DefangLabs/defang/src/pkg/term" ) var ErrExistingStacks = errors.New("there are still deployed stacks") @@ -34,9 +34,9 @@ func TearDownCD(ctx context.Context, provider client.Provider, force bool) error }) if len(stacks) > 0 { - term.Info("Some stacks are currently deployed. Run the following commands to tear them down:") + slog.InfoContext(ctx, "Some stacks are currently deployed. Run the following commands to tear them down:") for _, stack := range stacks { - term.Infof(" `defang down --workspace %s --project-name %s --stack %s`\n", stack.Workspace, stack.Project, stack.Stack) + slog.InfoContext(ctx, fmt.Sprintf(" `defang down --workspace %s --project-name %s --stack %s`", stack.Workspace, stack.Project, stack.Stack)) } if !force { return ErrExistingStacks diff --git a/src/pkg/cli/token.go b/src/pkg/cli/token.go index 632a26ec2..ee343af27 100644 --- a/src/pkg/cli/token.go +++ b/src/pkg/cli/token.go @@ -2,6 +2,8 @@ package cli import ( "context" + "fmt" + "log/slog" "time" "github.com/DefangLabs/defang/src/pkg/auth" @@ -19,7 +21,7 @@ func Token(ctx context.Context, client client.FabricClient, tenant types.TenantN } code, err := auth.StartAuthCodeFlow(ctx, false, func(token string) { - term.Debug("Getting access token for scope:", s) + slog.Debug(fmt.Sprint("Getting access token for scope:", s)) }, "token-cli") if err != nil { return err @@ -36,7 +38,7 @@ func Token(ctx context.Context, client client.FabricClient, tenant types.TenantN scopes = []string{string(s)} } - term.Debugf("Generating token for tenant %q with scopes %v", tenant, scopes) + slog.Debug("Generating token for tenant", "tenant", tenant, "scopes", scopes) resp, err := client.Token(ctx, &defangv1.TokenRequest{ Assertion: at, diff --git a/src/pkg/cli/upgrade.go b/src/pkg/cli/upgrade.go index b84f58654..62167f27e 100644 --- a/src/pkg/cli/upgrade.go +++ b/src/pkg/cli/upgrade.go @@ -3,6 +3,7 @@ package cli import ( "context" "errors" + "log/slog" "os" "os/exec" "path/filepath" @@ -18,13 +19,13 @@ func Upgrade(ctx context.Context) error { if err != nil { return err } - term.Debugf(" - Executable: %s\n", ex) + slog.Debug("Executable path", "path", ex) ex, err = filepath.EvalSymlinks(ex) if err != nil { return err } - term.Debugf(" - Evaluated: %s\n", ex) + slog.Debug("Evaluated executable path", "path", ex) if strings.HasPrefix(ex, "/nix/store/") { // Detect whether the user has used Flakes or nix-env @@ -93,6 +94,6 @@ func homebrewPrefix(ctx context.Context) (string, error) { } func printInstructions(cmd string) { - term.Info("To upgrade defang, run the following command:") + slog.Info("To upgrade defang, run the following command:") term.Print("\n ", cmd, "\n\n") } diff --git a/src/pkg/cli/waitForCdTaskExit.go b/src/pkg/cli/waitForCdTaskExit.go index 18fc558c9..e25aefacc 100644 --- a/src/pkg/cli/waitForCdTaskExit.go +++ b/src/pkg/cli/waitForCdTaskExit.go @@ -19,7 +19,7 @@ func WaitForCdTaskExit(ctx context.Context, provider client.Provider) error { select { case <-ticker.C: done, err := provider.GetDeploymentStatus(ctx) - // term.Debugf("Polled CD task status: done=%v, err=%v", done, err) + // slog.Debug("Polled CD task status", "done", done, "err", err) if err != nil { // End condition: EOF indicates that the task has completed successfully if errors.Is(err, io.EOF) { diff --git a/src/pkg/cli/whoami.go b/src/pkg/cli/whoami.go index 6e17e0b1c..e6b50df2f 100644 --- a/src/pkg/cli/whoami.go +++ b/src/pkg/cli/whoami.go @@ -2,10 +2,11 @@ package cli import ( "context" + "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/auth" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" @@ -33,7 +34,7 @@ func Whoami(ctx context.Context, fabric client.FabricClient, maybeProvider clien tenantSelection = types.TenantNameOrID(resp.TenantId) } - term.Debug("User ID: " + resp.UserId) + slog.Debug("User ID: " + resp.UserId) showData := ShowAccountData{ Region: resp.Region, SubscriberTier: resp.Tier, @@ -45,7 +46,7 @@ func Whoami(ctx context.Context, fabric client.FabricClient, maybeProvider clien if maybeProvider != nil { // Add provider account information if err := maybeProvider.Authenticate(ctx, false); err != nil { // Do not interactively login for whoami - term.Debug("Unable to authenticate provider:", err) + slog.Debug(fmt.Sprint("Unable to authenticate provider:", err)) } account, err := maybeProvider.AccountInfo(ctx) if err == nil { diff --git a/src/pkg/clouds/aws/codebuild/cfn/setup.go b/src/pkg/clouds/aws/codebuild/cfn/setup.go index 6fce2b414..db87d60f1 100644 --- a/src/pkg/clouds/aws/codebuild/cfn/setup.go +++ b/src/pkg/clouds/aws/codebuild/cfn/setup.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "slices" "strconv" "strings" @@ -12,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/clouds/aws" awscodebuild "github.com/DefangLabs/defang/src/pkg/clouds/aws/codebuild" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/aws/aws-sdk-go-v2/service/cloudformation" cfnTypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" "github.com/aws/smithy-go" @@ -94,7 +94,7 @@ func (a *AwsCfn) updateStackAndWait(ctx context.Context, templateBody string, fo return err // might call createStackAndWait depending on the error } - term.Info("Waiting for CloudFormation stack", a.stackName, "to be updated...") // TODO: verbose only + slog.InfoContext(ctx, fmt.Sprint("Waiting for CloudFormation stack", a.stackName, "to be updated...")) // TODO: verbose only dso, err := cloudformation.NewStackUpdateCompleteWaiter(cfn, update1s).WaitForOutput(ctx, &cloudformation.DescribeStacksInput{ StackName: uso.StackId, }, stackTimeout) @@ -131,7 +131,7 @@ func (a *AwsCfn) createStackAndWait(ctx context.Context, templateBody string, pa } } - term.Info("Waiting for CloudFormation stack", a.stackName, "to be created...") // TODO: verbose only + slog.InfoContext(ctx, fmt.Sprint("Waiting for CloudFormation stack", a.stackName, "to be created...")) // TODO: verbose only dso, err := cloudformation.NewStackCreateCompleteWaiter(cfn, create1s).WaitForOutput(ctx, &cloudformation.DescribeStacksInput{ StackName: ptr.String(a.stackName), }, stackTimeout) @@ -252,7 +252,7 @@ func (a *AwsCfn) TearDown(ctx context.Context) error { StackName: ptr.String(a.stackName), EnableTerminationProtection: ptr.Bool(false), }); err != nil { - term.Warnf("Failed to disable termination protection for CloudFormation stack %s: %v\n", a.stackName, err) + slog.WarnContext(ctx, fmt.Sprintf("Failed to disable termination protection for CloudFormation stack %s: %v\n", a.stackName, err)) } _, err = cfn.DeleteStack(ctx, &cloudformation.DeleteStackInput{ StackName: ptr.String(a.stackName), @@ -262,7 +262,7 @@ func (a *AwsCfn) TearDown(ctx context.Context) error { return err } - term.Info("Waiting for CloudFormation stack", a.stackName, "to be deleted...") // TODO: verbose only + slog.InfoContext(ctx, fmt.Sprint("Waiting for CloudFormation stack", a.stackName, "to be deleted...")) // TODO: verbose only return cloudformation.NewStackDeleteCompleteWaiter(cfn, delete1s).Wait(ctx, &cloudformation.DescribeStacksInput{ StackName: ptr.String(a.stackName), }, stackTimeout) diff --git a/src/pkg/clouds/aws/login.go b/src/pkg/clouds/aws/login.go index a607fe9a0..22ef3d2d4 100644 --- a/src/pkg/clouds/aws/login.go +++ b/src/pkg/clouds/aws/login.go @@ -13,6 +13,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "os" @@ -71,10 +72,10 @@ func (p *awsOAuthCredentialsProvider) Retrieve(ctx context.Context) (awssdk.Cred } // Access token is expired — use the refresh token to get new credentials. - term.Debug("AWS OAuth access token expired, refreshing...") + slog.Debug("AWS OAuth access token expired, refreshing...") refreshed, err := refreshToken(ctx, p.cached) if err != nil { - term.Debugf("failed to refresh AWS OAuth token: %v", err) + slog.Debug("failed to refresh AWS OAuth token", "error", err) return awssdk.Credentials{}, fmt.Errorf("refreshing AWS OAuth token: %w", err) } @@ -85,9 +86,9 @@ func (p *awsOAuthCredentialsProvider) Retrieve(ctx context.Context) (awssdk.Cred return awssdk.Credentials{}, fmt.Errorf("marshaling refreshed token: %w", err) } if err := p.tokenStore.Save(p.storeKey, string(tokenBytes)); err != nil { - term.Warnf("failed to persist refreshed AWS OAuth token: %v", err) + slog.WarnContext(ctx, fmt.Sprintf("failed to persist refreshed AWS OAuth token: %v", err)) } else { - term.Debugf("persisted refreshed AWS OAuth token for %q", p.storeKey) + slog.Debug("persisted refreshed AWS OAuth token", "storeKey", p.storeKey) } } @@ -126,14 +127,14 @@ func (a *Aws) Authenticate(ctx context.Context, interactive bool) error { } // 1. Try default AWS credentials - term.Debugf("checking default AWS credentials for region %s...", a.Region) + slog.Debug("checking default AWS credentials...", "region", a.Region) if _, err := a.testCredentials(ctx, nil); err != nil { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - term.Debugf("default AWS credentials invalid: %v", err) + slog.Debug("default AWS credentials invalid", "error", err) } else { - term.Debug("found valid default AWS credentials") + slog.Debug("found valid default AWS credentials") return nil } @@ -153,7 +154,7 @@ func (a *Aws) Authenticate(ctx context.Context, interactive bool) error { if !interactive { return errors.New("no valid AWS credentials found") // TODO: Better error message with possible doc link } - term.Info("no valid credentials found, starting interactive login...") + slog.InfoContext(ctx, "no valid credentials found, starting interactive login...") creds, err := a.tryInteractiveLogin(ctx, 3) if err != nil { return err @@ -179,7 +180,7 @@ func (a *Aws) tryInteractiveLogin(ctx context.Context, n int) (awssdk.Credential sum := sha256.Sum256([]byte(cached.LoginSession)) storeKey = fmt.Sprintf("%s%x", tokenStoreKeyPrefix, sum) if err := a.TokenStore.Save(storeKey, string(tokenBytes)); err != nil { - term.Warnf("failed to save AWS OAuth token: %v", err) + slog.WarnContext(ctx, fmt.Sprintf("failed to save AWS OAuth token: %v", err)) } } @@ -187,7 +188,7 @@ func (a *Aws) tryInteractiveLogin(ctx context.Context, n int) (awssdk.Credential creds, err := a.testCredentialsWithProfile(ctx, storeKey, provider) if err != nil { - term.Warnf("Cannot use login credentials: %v, please try again.", err) + slog.WarnContext(ctx, fmt.Sprintf("Cannot use login credentials: %v, please try again.", err)) continue } return creds, nil @@ -196,7 +197,7 @@ func (a *Aws) tryInteractiveLogin(ctx context.Context, n int) (awssdk.Credential } func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProvider, error) { - term.Debug("checking stored AWS OAuth tokens...") + slog.Debug("checking stored AWS OAuth tokens...") tokenNames, err := a.TokenStore.List(tokenStoreKeyPrefix) if err != nil { return nil, fmt.Errorf("failed to list tokens: %w", err) @@ -205,13 +206,13 @@ func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProv for _, name := range tokenNames { tokenJSON, err := a.TokenStore.Load(name) if err != nil { - term.Debugf("failed to load token %q: %v", name, err) + slog.Debug("failed to load token", "name", name, "error", err) continue } var cached awsTokenCache if err := json.Unmarshal([]byte(tokenJSON), &cached); err != nil { - term.Debugf("failed to unmarshal token %q: %v", name, err) + slog.Debug("failed to unmarshal token", "name", name, "error", err) continue } @@ -221,11 +222,11 @@ func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProv } if cached.RefreshToken == "" && time.Now().After(cached.AccessToken.ExpiresAt) { - term.Debugf("token %q is expired and has no refresh token, skipping", name) + slog.Debug("token is expired and has no refresh token, skipping", "name", name) continue } - term.Debugf("testing token %q (expires %s)...", name, cached.AccessToken.ExpiresAt.Format(time.RFC3339)) + slog.Debug("testing token...", "name", name, "expires", cached.AccessToken.ExpiresAt.Format(time.RFC3339)) provider := &awsOAuthCredentialsProvider{cached: &cached, tokenStore: a.TokenStore, storeKey: name} // Calling testCredentialsWithProfile triggers Retrieve(), which auto-refreshes @@ -236,7 +237,7 @@ func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProv if ctx.Err() != nil { return nil, ctx.Err() } - term.Debugf("token %q failed AWS_PROFILE role validation: %v, skipping...", name, err) + slog.Debug("token failed AWS_PROFILE role validation, skipping...", "name", name, "error", err) continue } return creds, nil @@ -256,19 +257,19 @@ func (a *Aws) testCredentialsWithProfile(ctx context.Context, name string, creds // If the stack/env specifies an AWS_PROFILE with role, try assume the role roleArn, profile, err := a.GetStackAwsProfileRoleArn(ctx) if err != nil { - term.Warnf("failed to get AWS_PROFILE role ARN: %v", err) + slog.WarnContext(ctx, fmt.Sprintf("failed to get AWS_PROFILE role ARN: %v", err)) } else if profile == "" { - term.Warn("AWS_PROFILE environment variable is not set, skipping AWS_PROFILE role validation") + slog.WarnContext(ctx, "AWS_PROFILE environment variable is not set, skipping AWS_PROFILE role validation") } else if roleArn != "" { same, err := sameRole(*identity.Arn, roleArn) if err != nil { - term.Warnf("failed to compare token identity with AWS_PROFILE role: %v", err) + slog.WarnContext(ctx, fmt.Sprintf("failed to compare token identity with AWS_PROFILE role: %v", err)) } else if same { - term.Debugf("token %q identity %q matches AWS_PROFILE role %q", name, *identity.Arn, roleArn) + slog.Debug("token identity matches AWS_PROFILE role", "name", name, "arn", *identity.Arn, "roleArn", roleArn) return creds, nil } - term.Debugf("checking if token %q identity %q can assume AWS_PROFILE role %q", name, *identity.Arn, roleArn) + slog.Debug("checking if token identity can assume AWS_PROFILE role", "name", name, "arn", *identity.Arn, "roleArn", roleArn) credCfg, err := LoadDefaultConfig(ctx, config.WithRegion(string(a.Region)), config.WithCredentialsProvider(creds)) if err != nil { return nil, err @@ -288,11 +289,11 @@ func (a *Aws) testCredentialsWithProfile(ctx context.Context, name string, creds return nil, fmt.Errorf("login successful, but does not have access to role %q in used by stack aws profile %q; token account %v does not match stack aws profile account %v", roleArn, profile, *identity.Account, parsedArn.AccountID) } // If cannot assume but it's the same account, we assume its a valid token - term.Warnf("login successful for AWS account %v which is same as the account specified by stack aws profile %q, assume its valid", *identity.Account, profile) + slog.WarnContext(ctx, fmt.Sprintf("login successful for AWS account %v which is same as the account specified by stack aws profile %q, assume its valid", *identity.Account, profile)) return creds, nil } // If able to assume the profile role, use the assumed role credentials - term.Debugf("token %q is valid and can assume AWS_PROFILE role %q\n", name, roleArn) + slog.Debug("token is valid and can assume AWS_PROFILE role", "name", name, "roleArn", roleArn) return assumeRoleProvider, nil } // If no AWS_PROFILE with role specified, any valid token is considered acceptable @@ -338,7 +339,7 @@ func (a *Aws) InteractiveLogin(ctx context.Context) (*awsTokenCache, error) { port := "8080" // default port if parsing fails parsed, err := url.Parse(redirectURL) if err != nil { - term.Warnf("failed to parse redirect URL %q, assume port 8080: %v", redirectURL, err) + slog.WarnContext(ctx, fmt.Sprintf("failed to parse redirect URL %q, assume port 8080: %v", redirectURL, err)) } else { port = parsed.Port() } @@ -365,7 +366,7 @@ func (a *Aws) CrossDeviceLogin(ctx context.Context) (*awsTokenCache, error) { state := rand.Text()[:16] // random state for CSRF protection authURL := auth.GetAuthorizeUrl("aws", "cross", string(a.Region), state, pkce.Challenge) - term.Println("Please visit the following URL to log in to AWS: (Right click the URL or press ENTER to open browser)") + fmt.Println("Please visit the following URL to log in to AWS: (Right click the URL or press ENTER to open browser)") term.Printf(" %s\n", authURL) term.Print("Enter the authorization code displayed in your browser: ") ctx, inputCh, done := term.OpenBrowserWithInputOnEnter(ctx, authURL) diff --git a/src/pkg/clouds/do/appPlatform/setup.go b/src/pkg/clouds/do/appPlatform/setup.go index 632f18147..d633223ed 100644 --- a/src/pkg/clouds/do/appPlatform/setup.go +++ b/src/pkg/clouds/do/appPlatform/setup.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "path" "regexp" @@ -13,7 +14,6 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/clouds/do" "github.com/DefangLabs/defang/src/pkg/dockerhub" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -90,7 +90,7 @@ func (d *DoApp) SetUpBucket(ctx context.Context) error { } func getImageSourceSpec(cdImagePath string) (*godo.ImageSourceSpec, error) { - term.Debugf("Using CD image: %q", cdImagePath) + slog.Debug("Using CD image", "cdImagePath", cdImagePath) image, err := dockerhub.ParseImage(cdImagePath) if err != nil { return nil, err @@ -146,7 +146,7 @@ func (d DoApp) Run(ctx context.Context, env []*godo.AppVariableDefinition, cdIma appList, _, err := client.Apps.List(ctx, &godo.ListOptions{}) if err != nil { - term.Debugf("Error listing apps: %s", err) + slog.Debug("Error listing apps", "error", err) } for _, app := range appList { @@ -157,7 +157,7 @@ func (d DoApp) Run(ctx context.Context, env []*godo.AppVariableDefinition, cdIma //Update current CD app if it exists if currentCd.Spec != nil && currentCd.Spec.Name != "" { - term.Debugf("Updating existing CD app") + slog.Debug("Updating existing CD app") currentCd, _, err = client.Apps.Update(ctx, currentCd.ID, &godo.AppUpdateRequest{ Spec: appJobSpec, UpdateAllSourceVersions: true, // force update of the CD image @@ -167,7 +167,7 @@ func (d DoApp) Run(ctx context.Context, env []*godo.AppVariableDefinition, cdIma return nil, err } } else { - term.Debugf("Creating new CD app") + slog.Debug("Creating new CD app") project, _, err := client.Projects.Create(ctx, &godo.CreateProjectRequest{ Name: CdName, Purpose: "Infrastructure for running Defang commands", diff --git a/src/pkg/clouds/gcp/api.go b/src/pkg/clouds/gcp/api.go index 5d6cdc35d..600e79de9 100644 --- a/src/pkg/clouds/gcp/api.go +++ b/src/pkg/clouds/gcp/api.go @@ -4,10 +4,10 @@ import ( "context" "errors" "fmt" + "log/slog" "time" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" "google.golang.org/api/googleapi" "google.golang.org/api/serviceusage/v1" ) @@ -27,7 +27,7 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { projectName := "projects/" + gcp.ProjectId for i := range maxAttempts { - term.Debugf("Enabling services: %v\n", apis) + slog.Debug("Enabling services", "apis", apis) req := &serviceusage.BatchEnableServicesRequest{ ServiceIds: apis, } @@ -39,9 +39,9 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { if errors.As(err, &apiErr) && (apiErr.Code == 403 || apiErr.Code == 401) { return fmt.Errorf("permission denied when enabling services: %w", err) } - term.Printf("Error: %+v (%T)\n", err, err) + slog.ErrorContext(ctx, fmt.Sprintf("Error: %+v (%T)", err, err)) if i < maxAttempts-1 { - term.Debugf("Failed to enable services, will retry in %v: %v\n", retryInterval, err) + slog.Debug("Failed to enable services, will retry", "retryInterval", retryInterval, "error", err) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } @@ -54,11 +54,11 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { for { op, err := opService.Get(operation.Name).Context(ctx).Do() if err != nil { - term.Warnf("Failed to get operation status: %v\n", err) + slog.WarnContext(ctx, fmt.Sprintf("Failed to get operation status: %v\n", err)) } else if op.Done { // Check if the operation is done if op.Error != nil { if i < maxAttempts-1 { - term.Debugf("Failed to enable services operation, will retry in %v: %v\n", retryInterval, op.Error) + slog.Debug("Failed to enable services operation, will retry", "retryInterval", retryInterval, "error", op.Error) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } diff --git a/src/pkg/clouds/gcp/iam.go b/src/pkg/clouds/gcp/iam.go index c8adc4582..764a0542b 100644 --- a/src/pkg/clouds/gcp/iam.go +++ b/src/pkg/clouds/gcp/iam.go @@ -3,6 +3,7 @@ package gcp import ( "context" "fmt" + "log/slog" "slices" "time" @@ -14,7 +15,6 @@ import ( resourcemanager "cloud.google.com/go/resourcemanager/apiv3" "cloud.google.com/go/storage" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" gax "github.com/googleapis/gax-go/v2" ) @@ -36,7 +36,7 @@ func (gcp Gcp) EnsureRoleExists(ctx context.Context, roleId, title, description role.GetTitle() == title && role.GetDescription() == description && role.Stage == iamadmpb.Role_GA { - term.Debugf("Role %s already exists", roleId) + slog.Debug("Role already exists", "roleId", roleId) return role.Name, nil } @@ -45,7 +45,7 @@ func (gcp Gcp) EnsureRoleExists(ctx context.Context, roleId, title, description role.Title = title role.Description = description role.Stage = iamadmpb.Role_GA - term.Infof("Updating role %s", roleId) + slog.InfoContext(ctx, "Updating role "+roleId) if _, err := client.UpdateRole(ctx, &iamadmpb.UpdateRoleRequest{Name: roleName, Role: role}); err != nil { return "", fmt.Errorf("failed to update role: %w", err) } @@ -63,12 +63,12 @@ func (gcp Gcp) EnsureRoleExists(ctx context.Context, roleId, title, description Stage: iamadmpb.Role_GA, // TODO: investigate stage }, } - term.Infof("Creating role %s", roleId) + slog.InfoContext(ctx, "Creating role "+roleId) role, err = client.CreateRole(ctx, req) if err != nil { return "", fmt.Errorf("failed to create role: %w", err) } - term.Debugf("Role %s created successfully.", roleId) + slog.Debug("Role created successfully", "roleId", roleId) } // Wait for the role to be created or updated @@ -102,13 +102,13 @@ func (gcp Gcp) EnsureServiceAccountExists(ctx context.Context, serviceAccountId, if err == nil { if account.GetDisplayName() == displayName && account.GetDescription() == description { - term.Debugf("Service account %s already exists", serviceAccountId) + slog.Debug("Service account already exists", "serviceAccountId", serviceAccountId) return account.Name, nil } account.DisplayName = displayName account.Description = description - term.Infof("Updating service account %s", serviceAccountId) + slog.InfoContext(ctx, "Updating service account "+serviceAccountId) if _, err := client.UpdateServiceAccount(ctx, &iamadmpb.ServiceAccount{Name: account.Name, DisplayName: displayName, Description: description}); err != nil { return "", fmt.Errorf("failed to update service account: %w", err) } @@ -124,13 +124,13 @@ func (gcp Gcp) EnsureServiceAccountExists(ctx context.Context, serviceAccountId, }, Name: "projects/" + gcp.ProjectId, } - term.Infof("Creating service account %s", serviceAccountId) + slog.InfoContext(ctx, "Creating service account "+serviceAccountId) account, err := client.CreateServiceAccount(ctx, req) if err != nil { return "", fmt.Errorf("failed to create service account: %w", err) } - term.Debugf("Service account %s created successfully.", serviceAccountId) + slog.Debug("Service account created successfully", "serviceAccountId", serviceAccountId) accountName := account.Name for start := time.Now(); time.Since(start) < 5*time.Minute; { account, err = client.GetServiceAccount(ctx, &iamadmpb.GetServiceAccountRequest{Name: accountName}) @@ -188,15 +188,15 @@ func (gcp Gcp) EnsurePrincipalHasBucketRoles(ctx context.Context, bucketName, pr } if !needUpdate { - term.Debugf("Principal %s already has roles %v on bucket %s", principal, roles, bucketName) + slog.Debug("Principal already has roles on bucket", "principal", principal, "roles", roles, "bucket", bucketName) return nil } - term.Infof("Updating IAM policy for principal %s on bucket %s", principal, bucketName) + slog.InfoContext(ctx, fmt.Sprintf("Updating IAM policy for principal %s on bucket %s", principal, bucketName)) for i := range maxAttempts { // Service account might not be visible for a few seconds after creation for policy attachment if err := bucket.IAM().SetPolicy(ctx, policy); err != nil { if i < maxAttempts-1 { - term.Infof("Failed to set IAM policy, will retry in %v: %v\n", retryInterval, err) + slog.InfoContext(ctx, fmt.Sprintf("Failed to set IAM policy, will retry in %v: %v\n", retryInterval, err)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } @@ -269,14 +269,14 @@ func (gcp Gcp) EnsurePrincipalHasServiceAccountRoles(ctx context.Context, princi return nil } - term.Infof("Updating IAM policy for %s on service account %s", principal, serviceAccount) + slog.InfoContext(ctx, fmt.Sprintf("Updating IAM policy for %s on service account %s", principal, serviceAccount)) for i := range maxAttempts { // Service account might not be visible for a few seconds after creation for policy attachment if _, err := client.SetIamPolicy(ctx, &iamadm.SetIamPolicyRequest{ Resource: resource, Policy: policy, }); err != nil { if i < maxAttempts-1 { - term.Infof("Failed to set IAM policy for service account %s, will retry in %v: %v\n", serviceAccount, retryInterval, err) + slog.InfoContext(ctx, fmt.Sprintf("Failed to set IAM policy for service account %s, will retry in %v: %v\n", serviceAccount, retryInterval, err)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } @@ -345,15 +345,15 @@ func ensurePrincipalHasRolesWithResource(ctx context.Context, client resourceWit } if !bindingNeedsUpdate && len(rolesNotFound) == 0 { - term.Debugf("%s already has roles %v on resource %s", principal, roles, resource) + slog.Debug("Principal already has roles on resource", "principal", principal, "roles", roles, "resource", resource) return nil } - term.Infof("Updating IAM policy for resource %s", resource) + slog.InfoContext(ctx, "Updating IAM policy for resource "+resource) for i := range maxAttempts { // Service account might not be visible for a few seconds after creation for policy attachment if _, err := client.SetIamPolicy(ctx, &iampb.SetIamPolicyRequest{Resource: resource, Policy: policy}); err != nil { if i < maxAttempts-1 { - term.Debugf("Failed to set IAM policy for resource %s, will retry in %v: %v\n", resource, retryInterval, err) + slog.Debug("Failed to set IAM policy for resource, will retry", "resource", resource, "retryInterval", retryInterval, "error", err) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } diff --git a/src/pkg/clouds/gcp/logging.go b/src/pkg/clouds/gcp/logging.go index 992611e4f..720bbe068 100644 --- a/src/pkg/clouds/gcp/logging.go +++ b/src/pkg/clouds/gcp/logging.go @@ -5,10 +5,10 @@ import ( "errors" "fmt" "io" + "log/slog" logging "cloud.google.com/go/logging/apiv2" "cloud.google.com/go/logging/apiv2/loggingpb" - "github.com/DefangLabs/defang/src/pkg/term" "google.golang.org/api/iterator" ) @@ -75,9 +75,9 @@ func (t *gcpLoggingTailer) Next(ctx context.Context) (*loggingpb.LogEntry, error func (t *gcpLoggingTailer) Close() error { // TODO: find out how to properly close the client - term.Debugf("Closing log tailer") + slog.Debug("Closing log tailer") e1 := t.tleClient.CloseSend() - term.Debugf("Closing log tailer client") + slog.Debug("Closing log tailer client") e2 := t.client.Close() return errors.Join(e1, e2) } @@ -116,7 +116,7 @@ func (gcp Gcp) ListLogEntries(ctx context.Context, query string, order Order) (L func (l *gcpLoggingLister) Next() (*loggingpb.LogEntry, error) { entry, err := l.it.Next() if err == iterator.Done { - term.Debugf("Closing log lister client") + slog.Debug("Closing log lister client") if err := l.client.Close(); err != nil { return nil, err } diff --git a/src/pkg/clouds/gcp/login.go b/src/pkg/clouds/gcp/login.go index 98d4f142e..c5ac20589 100644 --- a/src/pkg/clouds/gcp/login.go +++ b/src/pkg/clouds/gcp/login.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "os" "path" "slices" @@ -110,14 +111,14 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { // 1. Try the default application credentials or from the "GOOGLE_APPLICATION_CREDENTIALS" env var if set // - if the user has login with glcoud cli with application default credentials // - if the user has set GOOGLE_APPLICATION_CREDENTIALS to a service account key file with required permissions - term.Debugf("checking if application default credentials are available and has permission, GOOGLE_APPLICATION_CREDENTIALS=%q...", os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")) + slog.Debug("checking if application default credentials are available and has permission...", "GOOGLE_APPLICATION_CREDENTIALS", os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")) if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, nil); err != nil { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - term.Debugf("the application default credentials are missing permissions: %v", err) + slog.Debug("the application default credentials are missing permissions", "error", err) } else { - term.Debug("found valid application default credentials with required permissions") + slog.Debug("found valid application default credentials with required permissions") // No need to pass down ADC token source via options since ADC is automatically used by gcp sdk return nil } @@ -127,13 +128,13 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - term.Warnf("failed to get GitHub Actions OIDC token source: %v", err) + slog.WarnContext(ctx, fmt.Sprintf("failed to get GitHub Actions OIDC token source: %v", err)) } else if tokenSource != nil { - term.Debug("found GitHub Actions OIDC token source, testing permissions...") + slog.Debug("found GitHub Actions OIDC token source, testing permissions...") if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, tokenSource); err != nil { - term.Warnf("GitHub Actions OIDC token is missing required permissions on project %q: %v\nPlease ensure your workload identity provider and github actions permissions are set up correctly: https://docs.defang.com/defang-byoc/gcp/github-actions\n", gcp.ProjectId, err) + slog.WarnContext(ctx, fmt.Sprintf("GitHub Actions OIDC token is missing required permissions on project %q: %v\nPlease ensure your workload identity provider and github actions permissions are set up correctly: https://docs.defang.com/defang-byoc/gcp/github-actions\n", gcp.ProjectId, err)) } else { - term.Debug("GitHub Actions OIDC token has required permissions") + slog.Debug("GitHub Actions OIDC token has required permissions") gcp.Options = append(gcp.Options, option.WithTokenSource(tokenSource)) gcp.TokenSource = tokenSource gcp.Principal = principal @@ -146,9 +147,9 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - term.Warnf("failed to load stored credentials: %v", err) + slog.WarnContext(ctx, fmt.Sprintf("failed to load stored credentials: %v", err)) } else if tokenSource != nil { - term.Debug("found valid stored credentials with required permissions") + slog.Debug("found valid stored credentials with required permissions") gcp.Options = append(gcp.Options, option.WithTokenSource(tokenSource)) gcp.TokenSource = tokenSource return nil @@ -158,7 +159,7 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { if !interactive { return errors.New("No valid gcloud credentials found") // TODO: Better error message with possible doc link } - term.Debug("no valid tokens found in token store, starting interactive login flow...") + slog.Debug("no valid tokens found in token store, starting interactive login flow...") return gcp.tryInteractiveLogin(ctx, 3) } @@ -170,11 +171,11 @@ func (gcp *Gcp) tryInteractiveLogin(ctx context.Context, n int) error { } if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, tokenSource); err != nil { if errors.As(err, &ErrorMissingPermissions{}) { - term.Warnf("Token from interactive login is missing required permissions on project %q: %v\nPlease ensure your user has the following permissions: %v\n", gcp.ProjectId, err, requiredPerms) + slog.WarnContext(ctx, fmt.Sprintf("Token from interactive login is missing required permissions on project %q: %v\nPlease ensure your user has the following permissions: %v\n", gcp.ProjectId, err, requiredPerms)) } else { - term.Warnf("Failed to validate token from interactive login on project %q: %v\n", gcp.ProjectId, err) + slog.WarnContext(ctx, fmt.Sprintf("Failed to validate token from interactive login on project %q: %v\n", gcp.ProjectId, err)) } - term.Warn("Please try logging in again with an account that has the required permissions.") + slog.WarnContext(ctx, "Please try logging in again with an account that has the required permissions.") continue } gcp.Options = append(gcp.Options, option.WithTokenSource(tokenSource)) @@ -192,7 +193,7 @@ func (gcp *Gcp) tryInteractiveLogin(ctx context.Context, n int) error { return fmt.Errorf("failed to marshal token: %w", err) } if gcp.TokenStore == nil { - term.Warn("No token store configured, skipping persisting token") + slog.WarnContext(ctx, "No token store configured, skipping persisting token") return nil } if err := gcp.TokenStore.Save(tokenName, string(bytes)); err != nil { @@ -221,24 +222,24 @@ func (gcp *Gcp) findStoredCredentials(ctx context.Context) (oauth2.TokenSource, for _, name := range oauthTokenNames { tokenJson, err := gcp.TokenStore.Load(name) if err != nil { - term.Warnf("failed to load previously saved auth token %q: %v", name, err) + slog.WarnContext(ctx, fmt.Sprintf("failed to load previously saved auth token %q: %v", name, err)) continue } var token oauth2.Token if err = json.Unmarshal([]byte(tokenJson), &token); err != nil { - term.Warnf("failed to parse previously saved auth token %q: %v", name, err) + slog.WarnContext(ctx, fmt.Sprintf("failed to parse previously saved auth token %q: %v", name, err)) continue } - term.Debugf("Testing token %q from store for required permissions...", name) + slog.Debug("Testing token from store for required permissions...", "name", name) tokenSource := config.TokenSource(ctx, &token) if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, tokenSource); err == nil { - term.Debugf("Token %q is valid and has required permissions\n", name) + slog.Debug("Token is valid and has required permissions", "name", name) currentToken, err := tokenSource.Token() if err != nil { return nil, fmt.Errorf("failed to retrieve current token from token source: %w", err) } if currentToken.AccessToken != token.AccessToken || currentToken.Expiry != token.Expiry || currentToken.RefreshToken != token.RefreshToken { - term.Debugf("Token %q has been updated, persisting updated token...\n", name) + slog.Debug("Token has been updated, persisting updated token...", "name", name) bytes, err := json.Marshal(currentToken) if err != nil { return nil, fmt.Errorf("failed to marshal updated token: %w", err) @@ -254,7 +255,7 @@ func (gcp *Gcp) findStoredCredentials(ctx context.Context) (oauth2.TokenSource, if ctx.Err() != nil { return nil, ctx.Err() } - term.Debugf("Token %q is missing required permissions: %v\n", name, err) + slog.Debug("Token is missing required permissions", "name", name, "error", err) continue } } @@ -265,7 +266,7 @@ func findGithubCredentials(ctx context.Context) (oauth2.TokenSource, string, err // If both ACTIONS_ID_TOKEN_REQUEST_URL and GOOGLE_WORKLOAD_IDENTITY_PROVIDER are set, we're doing "Workload Identity Federation" with GCP using github id token githubTokenReqUrl := os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL") gcpProvider := os.Getenv("GOOGLE_WORKLOAD_IDENTITY_PROVIDER") - term.Debugf("ACTIONS_ID_TOKEN_REQUEST_URL=%q, GOOGLE_WORKLOAD_IDENTITY_PROVIDER=%q", githubTokenReqUrl, gcpProvider) + slog.Debug("GitHub Actions environment variables", "ACTIONS_ID_TOKEN_REQUEST_URL", githubTokenReqUrl, "GOOGLE_WORKLOAD_IDENTITY_PROVIDER", gcpProvider) if githubTokenReqUrl == "" || gcpProvider == "" { return nil, "", nil } @@ -326,7 +327,7 @@ func (gcp *Gcp) InteractiveLogin(ctx context.Context) (oauth2.TokenSource, error publicKeyBase64 := base64.URLEncoding.EncodeToString(pubKey[:]) authorizeURL := auth.GetAuthorizeUrl("gcp", publicKeyBase64) - term.Println("Please visit the following URL to log in to Google Cloud Platform: (Right click the URL or press ENTER to open browser)") + fmt.Println("Please visit the following URL to log in to Google Cloud Platform: (Right click the URL or press ENTER to open browser)") term.Printf(" %s\n", authorizeURL) ctx, done := term.OpenBrowserOnEnter(ctx, authorizeURL) diff --git a/src/pkg/clouds/gcp/storage.go b/src/pkg/clouds/gcp/storage.go index a27ab29af..248175764 100644 --- a/src/pkg/clouds/gcp/storage.go +++ b/src/pkg/clouds/gcp/storage.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "iter" + "log/slog" "strings" "time" @@ -12,7 +13,6 @@ import ( "cloud.google.com/go/iam/credentials/apiv1/credentialspb" "cloud.google.com/go/storage" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/google/uuid" "google.golang.org/api/impersonate" @@ -38,7 +38,7 @@ func (gcp Gcp) EnsureBucketExists(ctx context.Context, prefix string, versioning return "", fmt.Errorf("failed to get bucket with prefix %q: %w", prefix, err) } if existing != "" { - term.Debugf("Bucket %q already exists\n", existing) + slog.Debug("Bucket already exists", "bucket", existing) err := gcp.UpdateBucketVersioning(ctx, existing, versioning) if err != nil { return "", fmt.Errorf("failed to ensure versioning is enabled on existing bucket %q: %w", existing, err) @@ -53,7 +53,7 @@ func (gcp Gcp) EnsureBucketExists(ctx context.Context, prefix string, versioning defer client.Close() newBucketName := fmt.Sprintf("%s-%s", prefix, pkg.RandomID()) - term.Infof("Creating defang cd bucket %q", newBucketName) + slog.InfoContext(ctx, fmt.Sprintf("Creating defang cd bucket %q", newBucketName)) bucket := client.Bucket(newBucketName) if err := bucket.Create(ctx, gcp.ProjectId, &storage.BucketAttrs{ diff --git a/src/pkg/debug/debug.go b/src/pkg/debug/debug.go index a1f835b86..b3bc76038 100644 --- a/src/pkg/debug/debug.go +++ b/src/pkg/debug/debug.go @@ -191,7 +191,7 @@ func buildDeploymentDebugPrompt(debugConfig DebugConfig) string { if debugConfig.Project != nil { yaml, err := compose.MarshalYAML(debugConfig.Project) if err != nil { - term.Println("Failed to marshal compose project to YAML for debug:", err) + fmt.Println("Failed to marshal compose project to YAML for debug:", err) } prompt += fmt.Sprintf( "The compose files are at %s. The compose file is as follows:\n\n%s", diff --git a/src/pkg/debug/debug_test.go b/src/pkg/debug/debug_test.go index 9abf216cf..9d6479da8 100644 --- a/src/pkg/debug/debug_test.go +++ b/src/pkg/debug/debug_test.go @@ -3,6 +3,7 @@ package debug import ( "context" "fmt" + "log/slog" "os" "testing" "time" @@ -10,7 +11,6 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) @@ -164,7 +164,7 @@ func TestDebugComposeLoadError(t *testing.T) { _, loadErr := loader.LoadProject(ctx) if loadErr != nil { - term.Error("Cannot load project:", loadErr) + slog.Error(fmt.Sprint("Cannot load project:", loadErr)) project, err := loader.CreateProjectForDebug() assert.NoError(t, err, "CreateProjectForDebug should not return an error") diff --git a/src/pkg/dockerhub/dockerhub.go b/src/pkg/dockerhub/dockerhub.go index 16da979d1..0fcd4ebb6 100644 --- a/src/pkg/dockerhub/dockerhub.go +++ b/src/pkg/dockerhub/dockerhub.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "path" "slices" @@ -15,7 +16,6 @@ import ( "time" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/docker/cli/cli/config" ) @@ -79,7 +79,7 @@ func GenerateNewPublicOnlyPAT(ctx context.Context, label string) (string, string } pat, err = docHubClient.CreatePAT(ctx, label, []string{"repo:public_read"}) if err != nil { - term.Infof("Failed to create Docker Hub PAT, fallback to existing docker credentials: %v", err) + slog.InfoContext(ctx, fmt.Sprintf("Failed to create Docker Hub PAT, fallback to existing docker credentials: %v", err)) // Fallback to use the password as PAT pat = password } diff --git a/src/pkg/github/version.go b/src/pkg/github/version.go index 45b93c633..eaf058e38 100644 --- a/src/pkg/github/version.go +++ b/src/pkg/github/version.go @@ -4,10 +4,10 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "os" "github.com/DefangLabs/defang/src/pkg/http" - "github.com/DefangLabs/defang/src/pkg/term" ) const latestUrl = "https://api.github.com/repos/DefangLabs/defang/releases/latest" @@ -35,12 +35,12 @@ func GetLatestReleaseTag(ctx context.Context) (string, error) { } defer resp.Body.Close() if resp.StatusCode != 200 { - term.Debug(resp.Header) + slog.Debug("unexpected status", "header", resp.Header) // The primary rate limit for unauthenticated requests is 60 requests per hour, per IP. // The API returns a 403 status code when the rate limit is exceeded. githubError := githubError{Message: resp.Status} if err := json.NewDecoder(resp.Body).Decode(&githubError); err != nil { - term.Debugf("Failed to decode GitHub response: %v", err) + slog.Debug("Failed to decode GitHub response", "error", err) } return "", fmt.Errorf("error fetching release info from GitHub: %s", githubError.Message) } diff --git a/src/pkg/login/agree_tos.go b/src/pkg/login/agree_tos.go index dc8586244..a452302c7 100644 --- a/src/pkg/login/agree_tos.go +++ b/src/pkg/login/agree_tos.go @@ -3,6 +3,8 @@ package login import ( "context" "errors" + "fmt" + "log/slog" "github.com/AlecAivazis/survey/v2" "github.com/DefangLabs/defang/src/pkg/cli/client" @@ -19,12 +21,12 @@ func InteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) error if client.TermsAccepted() { // The user has already agreed to the terms of service recently if err := nonInteractiveAgreeToS(ctx, fabric); err != nil { - term.Debug("unable to agree to terms:", err) // not fatal + slog.Debug(fmt.Sprint("unable to agree to terms:", err)) // not fatal } return nil } - term.Println("Our latest terms of service can be found at https://s.defang.io/tos") + fmt.Println("Our latest terms of service can be found at https://s.defang.io/tos") var agreeToS bool err := survey.AskOne(&survey.Confirm{ @@ -50,7 +52,7 @@ func NonInteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) err // Persist the terms agreement in the state file so that we don't ask again if err := client.AcceptTerms(); err != nil { - term.Debug("unable to persist terms agreement:", err) // not fatal + slog.Debug(fmt.Sprint("unable to persist terms agreement:", err)) // not fatal } return nonInteractiveAgreeToS(ctx, fabric) @@ -60,6 +62,6 @@ func nonInteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) err if err := fabric.AgreeToS(ctx); err != nil { return err } - term.Info("You have agreed to the Defang terms of service") + slog.InfoContext(ctx, "You have agreed to the Defang terms of service") return nil } diff --git a/src/pkg/login/login.go b/src/pkg/login/login.go index 25dc1700d..5a9346073 100644 --- a/src/pkg/login/login.go +++ b/src/pkg/login/login.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "path/filepath" @@ -27,7 +28,7 @@ type AuthService interface { type OpenAuthService struct{} func (OpenAuthService) login(ctx context.Context, fabricAddr string, flow LoginFlow, mcpClient string) (string, error) { - term.Debug("Logging in to", fabricAddr) + slog.Debug(fmt.Sprint("Logging in to", fabricAddr)) code, err := auth.StartAuthCodeFlow(ctx, flow, func(token string) { client.SaveAccessToken(fabricAddr, token) @@ -60,7 +61,7 @@ func interactiveLogin(ctx context.Context, fabricAddr string, flow LoginFlow, mc } if err := client.SaveAccessToken(fabricAddr, token); err != nil { - term.Warn(err) + slog.WarnContext(ctx, fmt.Sprintf("%v", err)) var pathError *os.PathError if errors.As(err, &pathError) { term.Printf("\nTo fix file permissions, run:\n\n sudo chown -R $(whoami) %q\n", pathError.Path) @@ -72,12 +73,12 @@ func interactiveLogin(ctx context.Context, fabricAddr string, flow LoginFlow, mc } func NonInteractiveGitHubLogin(ctx context.Context, fabric client.FabricClient, fabricAddr string) error { - term.Debug("Non-interactive login using GitHub Actions id-token") + slog.Debug("Non-interactive login using GitHub Actions id-token") idToken, err := github.GetIdToken(ctx, "") // default audience (ie. https://github.com/ORG) if err != nil { return fmt.Errorf("non-interactive login failed: %w", err) } - term.Debug("Got GitHub Actions id-token") + slog.Debug("Got GitHub Actions id-token") // Create a Fabric token using the GitHub token as an assertion resp, err := fabric.Token(ctx, &defangv1.TokenRequest{ @@ -103,7 +104,7 @@ func NonInteractiveGitHubLogin(ctx context.Context, fabric client.FabricClient, os.Setenv("AWS_WEB_IDENTITY_TOKEN_FILE", jwtPath) os.Setenv("AWS_ROLE_SESSION_NAME", "defang-cli") // TODO: from WhoAmI } else { - term.Debugf("AWS_WEB_IDENTITY_TOKEN_FILE is already set; not writing token to a new file") + slog.Debug("AWS_WEB_IDENTITY_TOKEN_FILE is already set; not writing token to a new file") } return err @@ -111,7 +112,7 @@ func NonInteractiveGitHubLogin(ctx context.Context, fabric client.FabricClient, func writeWebIdentityToken(fabricAddr, token string) (string, error) { jwtPath, _ := client.GetWebIdentityTokenFile(fabricAddr) - term.Debugf("writing web identity token to %s", jwtPath) + slog.Debug("writing web identity token to " + jwtPath) dir, _ := filepath.Split(jwtPath) if err := os.MkdirAll(dir, 0700); err != nil { return "", fmt.Errorf("failed to create web identity token directory: %w", err) @@ -129,8 +130,8 @@ func InteractiveRequireLoginAndToS(ctx context.Context, fabric client.FabricClie if err = fabric.CheckLoginAndToS(ctx); err != nil { // Login interactively now; only do this for authorization-related errors if connect.CodeOf(err) == connect.CodeUnauthenticated { - term.Debug("Server error:", err) - term.Warn("Please log in to continue.") + slog.Debug(fmt.Sprint("Server error:", err)) + slog.WarnContext(ctx, "Please log in to continue.") term.ResetWarnings() // clear any previous warnings so we don't show them again defer func() { track.Cmd(nil, "Login", P("reason", err)) }() @@ -153,7 +154,7 @@ func InteractiveRequireLoginAndToS(ctx context.Context, fabric client.FabricClie // Check if the user has agreed to the terms of service and show a prompt if needed if connect.CodeOf(err) == connect.CodeFailedPrecondition { - term.Warn(client.PrettyError(err)) + slog.WarnContext(ctx, fmt.Sprintf("%v", client.PrettyError(err))) defer func() { track.Cmd(nil, "Terms", P("reason", err)) }() if err = InteractiveAgreeToS(ctx, fabric); err != nil { diff --git a/src/pkg/logs/slog.go b/src/pkg/logs/slog.go index a840c47cf..4da8e0c11 100644 --- a/src/pkg/logs/slog.go +++ b/src/pkg/logs/slog.go @@ -4,12 +4,15 @@ import ( "context" "log/slog" "strings" + "sync" "github.com/DefangLabs/defang/src/pkg/term" ) type termHandler struct { - t *term.Term + t *term.Term + attrs string // pre-formatted persistent attrs + mu sync.Mutex } func newTermHandler(t *term.Term) *termHandler { @@ -21,43 +24,46 @@ func NewTermLogger(t *term.Term) *slog.Logger { } func (h *termHandler) Handle(ctx context.Context, r slog.Record) error { + h.mu.Lock() + defer h.mu.Unlock() + msg := r.Message - // Format attrs if any - if r.NumAttrs() > 0 { - var builder strings.Builder - builder.WriteString(msg) - opened := false - r.Attrs(func(a slog.Attr) bool { - if !opened { - builder.WriteString(" {") - opened = true - } else { - builder.WriteString(", ") - } - strVal := a.String() - if len(strVal) > 80 { - runes := []rune(strVal) + + // Collect attrs from WithAttrs and from this record + var sb strings.Builder + if h.attrs != "" { + sb.WriteString(h.attrs) + } + r.Attrs(func(a slog.Attr) bool { + if sb.Len() > 0 { + sb.WriteString(", ") + } + strVal := a.String() + if len(strVal) > 80 { + runes := []rune(strVal) + if len(runes) > 77 { strVal = string(runes[:77]) + "..." } - builder.WriteString(strVal) - return true - }) - builder.WriteString("}") - msg = builder.String() + } + sb.WriteString(strVal) + return true + }) + if sb.Len() > 0 { + msg = msg + " {" + sb.String() + "}" } switch r.Level { case slog.LevelDebug: - _, err := h.t.Debug(msg) + _, err := h.t.WriteDebug(msg) return err case slog.LevelInfo: - _, err := h.t.Info(msg) + _, err := h.t.WriteInfo(msg) return err case slog.LevelWarn: - _, err := h.t.Warn(msg) + _, err := h.t.WriteWarn(msg) return err case slog.LevelError: - _, err := h.t.Error(msg) + _, err := h.t.WriteError(msg) return err default: _, err := h.t.Println(msg) @@ -73,11 +79,24 @@ func (h *termHandler) Enabled(ctx context.Context, level slog.Level) bool { } func (h *termHandler) WithAttrs(attrs []slog.Attr) slog.Handler { - // Since we format attributes in Handle(), we can just return self - return h + var sb strings.Builder + sb.WriteString(h.attrs) + for _, a := range attrs { + if sb.Len() > 0 { + sb.WriteString(", ") + } + strVal := a.String() + if len(strVal) > 80 { + runes := []rune(strVal) + if len(runes) > 77 { + strVal = string(runes[:77]) + "..." + } + } + sb.WriteString(strVal) + } + return &termHandler{t: h.t, attrs: sb.String()} } func (h *termHandler) WithGroup(name string) slog.Handler { - // Groups are not supported in this implementation return h } diff --git a/src/pkg/mcp/mcp_server.go b/src/pkg/mcp/mcp_server.go index 1819aaeae..75638dbfc 100644 --- a/src/pkg/mcp/mcp_server.go +++ b/src/pkg/mcp/mcp_server.go @@ -3,6 +3,7 @@ package mcp import ( "context" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/agent/common" agentTools "github.com/DefangLabs/defang/src/pkg/agent/tools" @@ -10,7 +11,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/mcp/resources" "github.com/DefangLabs/defang/src/pkg/mcp/tools" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/track" "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" @@ -30,13 +30,13 @@ type ToolTracker struct { func (t *ToolTracker) TrackTool(name string, handler server.ToolHandlerFunc) server.ToolHandlerFunc { return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { name := request.Params.Name - term.Debug("MCP Tool Called: " + name + " with params: " + fmt.Sprintf("%+v", request.Params)) + slog.Debug("MCP Tool Called", "name", name, "params", request.Params) track.Evt("MCP Tool Called", track.P("tool", name), track.P("client", t.client), track.P("cluster", t.fabricAddr), track.P("provider", *t.providerId)) resp, err := handler(ctx, request) if err != nil { - term.Error("MCP Tool Failed: "+name, "error", err) + slog.ErrorContext(ctx, fmt.Sprint("MCP Tool Failed: "+name, "error", err)) } else { - term.Debug("MCP Tool Succeeded: " + name) + slog.Debug("MCP Tool Succeeded: " + name) } track.Evt("MCP Tool Done", track.P("tool", name), track.P("client", t.client), track.P("cluster", t.fabricAddr), track.P("provider", *t.providerId), track.P("error", err)) return resp, err diff --git a/src/pkg/mcp/resources/resources.go b/src/pkg/mcp/resources/resources.go index 75b5a762d..a2b9e0f55 100644 --- a/src/pkg/mcp/resources/resources.go +++ b/src/pkg/mcp/resources/resources.go @@ -3,11 +3,11 @@ package resources import ( "context" "fmt" + "log/slog" "os" "path/filepath" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" ) @@ -26,7 +26,7 @@ var samplesExamplesPath = filepath.Join(client.StateDir, "samples_examples.json" // setupDocumentationResource configures and adds the documentation resource to the MCP server func setupDocumentationResource(s *server.MCPServer) { - term.Info("Creating documentation resource") + slog.Info("Creating documentation resource") docResource := mcp.NewResource( "doc:///knowledge_base/knowledge_base.json", "knowledge_base", @@ -38,7 +38,7 @@ func setupDocumentationResource(s *server.MCPServer) { // Read the file file, err := os.ReadFile(knowledgeBasePath) if err != nil { - term.Error("Failed to read resource file", "error", err, "path", "knowledge_base.json") + slog.ErrorContext(ctx, fmt.Sprint("Failed to read resource file", "error", err, "path", "knowledge_base.json")) return nil, fmt.Errorf("failed to read resource file knowledge_base.json: %w", err) } @@ -55,7 +55,7 @@ func setupDocumentationResource(s *server.MCPServer) { // setupSamplesResource configures and adds the samples examples resource to the MCP server func setupSamplesResource(s *server.MCPServer) { - term.Info("Creating samples examples resource") + slog.Info("Creating samples examples resource") samplesResource := mcp.NewResource( "doc:///knowledge_base/samples_examples.json", "defang_dockerfile_and_compose_examples", @@ -68,7 +68,7 @@ func setupSamplesResource(s *server.MCPServer) { // Read the file file, err := os.ReadFile(samplesExamplesPath) if err != nil { - term.Error("Failed to read resource file", "error", err, "path", "samples_examples.json") + slog.ErrorContext(ctx, fmt.Sprint("Failed to read resource file", "error", err, "path", "samples_examples.json")) return nil, fmt.Errorf("failed to read resource file samples_examples.json: %w", err) } diff --git a/src/pkg/mcp/setup.go b/src/pkg/mcp/setup.go index 176271240..2bf24f731 100644 --- a/src/pkg/mcp/setup.go +++ b/src/pkg/mcp/setup.go @@ -3,6 +3,7 @@ package mcp import ( "encoding/json" "fmt" + "log/slog" "os" "path/filepath" "runtime" @@ -10,10 +11,8 @@ import ( "strings" "github.com/AlecAivazis/survey/v2" - "github.com/pelletier/go-toml/v2" - - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/track" + "github.com/pelletier/go-toml/v2" ) // MCPServerConfig represents the configuration for an MCP server @@ -344,7 +343,7 @@ func SetupClient(clientStr string) error { return err } - term.Infof("Updating %q\n", configPath) + slog.Info(fmt.Sprintf("Updating %q\n", configPath)) // Create the directory if it doesn't exist configDir := filepath.Dir(configPath) @@ -357,7 +356,7 @@ func SetupClient(clientStr string) error { return fmt.Errorf("failed to update MCP config file for client %q: %w", client, err) } - term.Infof("Ensure %s is upgraded to the latest version and restarted for MCP settings to take effect.\n", client) + slog.Info(fmt.Sprintf("Ensure %s is upgraded to the latest version and restarted for MCP settings to take effect.\n", client)) return nil } diff --git a/src/pkg/mcp/utils.go b/src/pkg/mcp/utils.go index 2f68c063d..5a65c6ab6 100644 --- a/src/pkg/mcp/utils.go +++ b/src/pkg/mcp/utils.go @@ -3,11 +3,11 @@ package mcp import ( "fmt" "io" + "log/slog" "net/http" "os" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" ) const DocumentationEndpoint = "data" @@ -20,60 +20,60 @@ var KnowledgeBaseDir = client.StateDir var knowledgeBaseFilenames = [...]string{"knowledge_base.json", "samples_examples.json"} func SetupKnowledgeBase() error { - term.Debug("Setting up knowledge base") - term.Debugf("Attempting to download knowledge base files: %v", knowledgeBaseFilenames) + slog.Debug("Setting up knowledge base") + slog.Debug("Attempting to download knowledge base files", "files", knowledgeBaseFilenames) // Create knowledge base directory if it doesn't exist - term.Debugf("Creating knowledge base directory: %s", KnowledgeBaseDir) + slog.Debug("Creating knowledge base directory: " + KnowledgeBaseDir) if err := os.MkdirAll(KnowledgeBaseDir, 0700); err != nil { - term.Error("Failed to create knowledge base directory", "error", err) + slog.Error(fmt.Sprint("Failed to create knowledge base directory", "error", err)) return err } for _, filename := range knowledgeBaseFilenames { - term.Debugf("Downloading knowledge base file: %s", filename) + slog.Debug("Downloading knowledge base file: " + filename) err := downloadKnowledgeBase(KnowledgeBaseDir+"/"+filename, "/"+DocumentationEndpoint+"/"+filename) if err != nil { - term.Error("Failed to download knowledge base file", "error", err, "filename", filename) + slog.Error(fmt.Sprint("Failed to download knowledge base file", "error", err, "filename", filename)) return err } } - term.Debug("Successfully downloaded knowledge base files") + slog.Debug("Successfully downloaded knowledge base files") return nil } func downloadKnowledgeBase(filepath string, path string) (err error) { // Create the file out, err := os.Create(filepath) - term.Debugf("Creating file: %s", filepath) + slog.Debug("Creating file: " + filepath) if err != nil { - term.Error("Failed to create file", "error", err, "filepath", filepath) + slog.Error(fmt.Sprint("Failed to create file", "error", err, "filepath", filepath)) return err } defer out.Close() // Get the data resp, err := http.Get(AskDefangBaseURL + path) - term.Debugf("Downloading file: %s", path) + slog.Debug("Downloading file: " + path) if err != nil { - term.Error("Failed to download file", "error", err, "url", path) + slog.Error(fmt.Sprint("Failed to download file", "error", err, "url", path)) return err } defer resp.Body.Close() // Check server response - term.Debugf("Checking server response: %s", resp.Status) + slog.Debug("Checking server response: " + resp.Status) if resp.StatusCode != http.StatusOK { - term.Error("Failed to download file", "error", fmt.Errorf("bad status: %s", resp.Status), "url", path) + slog.Error(fmt.Sprint("Failed to download file", "error", fmt.Errorf("bad status: %s", resp.Status), "url", path)) return fmt.Errorf("bad status: %s", resp.Status) } // Writer the body to file - term.Debugf("Copying Using IO Copy: %s", filepath) + slog.Debug("Copying Using IO Copy: " + filepath) _, err = io.Copy(out, resp.Body) if err != nil { - term.Error("Failed to write file", "error", err, "filepath", filepath) + slog.Error(fmt.Sprint("Failed to write file", "error", err, "filepath", filepath)) return err } diff --git a/src/pkg/migrate/heroku.go b/src/pkg/migrate/heroku.go index 5b1f33d56..d820ef137 100644 --- a/src/pkg/migrate/heroku.go +++ b/src/pkg/migrate/heroku.go @@ -4,8 +4,10 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" + "log/slog" "net/http" "os" "os/exec" @@ -14,7 +16,6 @@ import ( "github.com/AlecAivazis/survey/v2" ourHttp "github.com/DefangLabs/defang/src/pkg/http" "github.com/DefangLabs/defang/src/pkg/surveyor" - "github.com/DefangLabs/defang/src/pkg/term" ) type HerokuApplicationInfo struct { @@ -29,14 +30,14 @@ type HerokuApplicationInfo struct { func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterface, appName string) (HerokuApplicationInfo, error) { var applicationInfo HerokuApplicationInfo - term.Info("Identifying deployed dynos") + slog.InfoContext(ctx, "Identifying deployed dynos") dynos, err := client.ListDynos(ctx, appName) if err != nil { return HerokuApplicationInfo{}, fmt.Errorf("failed to list dynos: %w", err) } applicationInfo.Dynos = dynos - term.Debugf("Dynos for the selected application: %+v\n", dynos) + slog.Debug("Dynos for the selected application", "dynos", dynos) dynoSizes := make(map[string]HerokuDynoSize) for _, dyno := range dynos { @@ -48,7 +49,7 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf } applicationInfo.DynoSizes = dynoSizes - term.Debugf("Dyno sizes for the selected application: %+v\n", dynoSizes) + slog.Debug("Dyno sizes for the selected application", "dynoSizes", dynoSizes) releaseTasks, err := client.GetReleaseTasks(ctx, appName) if err != nil { @@ -56,15 +57,15 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf } applicationInfo.ReleaseTasks = releaseTasks - term.Debugf("Release tasks for the selected application: %+v\n", releaseTasks) + slog.Debug("Release tasks for the selected application", "releaseTasks", releaseTasks) - term.Info("Identifying configured addons") + slog.InfoContext(ctx, "Identifying configured addons") addons, err := client.ListAddons(ctx, appName) if err != nil { return HerokuApplicationInfo{}, fmt.Errorf("failed to list Heroku addons: %w", err) } applicationInfo.Addons = addons - term.Debugf("Addons for the selected application: %+v\n", addons) + slog.Debug("Addons for the selected application", "addons", addons) for _, addon := range addons { if addon.AddonService.Name == "heroku-postgresql" { @@ -76,7 +77,7 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf } } - term.Debugf("Postgres info for the selected application: %+v\n", applicationInfo.PGInfo) + slog.Debug("Postgres info for the selected application", "pgInfo", applicationInfo.PGInfo) configVars, err := client.ListConfigVars(ctx, appName) if err != nil { @@ -101,7 +102,7 @@ func selectSourceApplication(surveyor surveyor.Surveyor, appNames []string) (str if selectedApp != "" { break } - term.Warn("No application selected. Please select an application.") + slog.Warn("No application selected. Please select an application.") } return selectedApp, nil @@ -319,14 +320,14 @@ func authenticateHerokuCLI() error { return nil } - term.Info("You need to authenticate with the Heroku CLI.") - term.Info("If a browser window does not open, run `heroku login` in a separate shell and try again.") + slog.Info("You need to authenticate with the Heroku CLI.") + slog.Info("If a browser window does not open, run `heroku login` in a separate shell and try again.") cmd = exec.Command("heroku", "login") // cmd needs to receive any keypress on stdin in order to open a browser cmd.Stdin = bytes.NewBuffer([]byte{'\n'}) _, err = cmd.Output() if err != nil { - term.Debugf("Failed to run `heroku login`: %v", err) + slog.Debug("Failed to run `heroku login`", "err", err) return err } @@ -345,22 +346,22 @@ func getHerokuAuthTokenFromCLI() (string, error) { if err != nil { return "", fmt.Errorf("Heroku CLI is not installed: %w", err) } - term.Info("The Heroku CLI is installed, we'll use it to generate a short-lived authorization token") + slog.Info("The Heroku CLI is installed, we'll use it to generate a short-lived authorization token") err = authenticateHerokuCLI() if err != nil { - term.Debugf("Failed to authenticate Heroku CLI: %v", err) + slog.Debug("Failed to authenticate Heroku CLI", "err", err) return "", err } - term.Debug("Successfully authenticated with Heroku") + slog.Debug("Successfully authenticated with Heroku") cmd := exec.Command("heroku", "authorizations:create", "--expires-in=300", "--json") output, err := cmd.Output() if err != nil { - term.Debugf("Failed to run `heroku authorizations:create`: %v", err) + slog.Debug("Failed to run `heroku authorizations:create`", "err", err) return "", err } - term.Debugf("received output from heroku cli: %s", output) + slog.Debug("Received output from heroku CLI authorization command") var result struct { AccessToken struct { @@ -368,25 +369,29 @@ func getHerokuAuthTokenFromCLI() (string, error) { } `json:"access_token"` } err = json.Unmarshal(output, &result) - if err != nil || result.AccessToken.Token == "" { - term.Debugf("Failed to parse Heroku CLI output: %v", err) + if err != nil { + slog.Debug("Failed to parse Heroku CLI output", "err", err) return "", err } + if result.AccessToken.Token == "" { + slog.Debug("Heroku CLI output did not include an access token") + return "", errors.New("heroku CLI returned an empty access token") + } - term.Debug("Successfully obtained Heroku token via CLI") + slog.Debug("Successfully obtained Heroku token via CLI") return result.AccessToken.Token, nil } func getHerokuAuthToken() (string, error) { token := os.Getenv("HEROKU_API_KEY") if token != "" { - term.Debug("Using HEROKU_API_KEY environment variable") + slog.Debug("Using HEROKU_API_KEY environment variable") return token, nil } token = os.Getenv("HEROKU_AUTH_TOKEN") if token != "" { - term.Debug("Using HEROKU_AUTH_TOKEN environment variable") + slog.Debug("Using HEROKU_AUTH_TOKEN environment variable") return token, nil } @@ -395,7 +400,7 @@ func getHerokuAuthToken() (string, error) { return token, nil } - term.Debug("Prompting for Heroku auth token") + slog.Debug("Prompting for Heroku auth token") for { err := survey.AskOne(&survey.Password{ diff --git a/src/pkg/migrate/migrate.go b/src/pkg/migrate/migrate.go index b1e8802b6..44bd09c1d 100644 --- a/src/pkg/migrate/migrate.go +++ b/src/pkg/migrate/migrate.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "runtime" "slices" "strings" @@ -12,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" "github.com/DefangLabs/defang/src/pkg/surveyor" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" "go.yaml.in/yaml/v4" ) @@ -26,7 +26,7 @@ func InteractiveSetup(ctx context.Context, fabric client.FabricClient, surveyor sourcePlatform = selected } - term.Debugf("Selected source platform: %s", sourcePlatform) + slog.Debug("Selected source platform", "sourcePlatform", sourcePlatform) var composeFileContents string var err error @@ -56,7 +56,7 @@ func setupFromHeroku(ctx context.Context, fabric client.FabricClient, surveyor s // Here you can add logic to process the retrieved apps and set up the project accordingly // For now, we just print the apps - term.Debugf("Your Heroku applications: %+v\n", apps) + slog.Debug("Your Heroku applications", "apps", apps) appNames := make([]string, len(apps)) for i, app := range apps { @@ -68,23 +68,23 @@ func setupFromHeroku(ctx context.Context, fabric client.FabricClient, surveyor s return "", fmt.Errorf("failed to select source application: %w", err) } - term.Infof("Collecting information about %q...", sourceApp) + slog.InfoContext(ctx, fmt.Sprintf("Collecting information about %q...", sourceApp)) applicationInfo, err := collectHerokuApplicationInfo(ctx, herokuClient, sourceApp) if err != nil { return "", fmt.Errorf("failed to collect Heroku application info: %w", err) } - term.Debugf("Application info: %+v\n", applicationInfo) + slog.Debug("Application info", "applicationInfo", applicationInfo) sanitizedApplicationInfo, err := sanitizeHerokuApplicationInfo(applicationInfo) if err != nil { return "", fmt.Errorf("failed to sanitize Heroku application info: %w", err) } - term.Debugf("Sanitized application info: %+v\n", sanitizedApplicationInfo) + slog.Debug("Sanitized application info", "sanitizedApplicationInfo", sanitizedApplicationInfo) - term.Info("Generating compose file...") + slog.InfoContext(ctx, "Generating compose file...") composeFileContents, err := generateComposeFile(ctx, fabric, defangv1.SourcePlatform_SOURCE_PLATFORM_HEROKU, sourceApp, sanitizedApplicationInfo) if err != nil { @@ -129,7 +129,7 @@ func generateComposeFile(ctx context.Context, fabric client.FabricClient, platfo } responseStr := string(resp.Compose) - term.Debugf("Received compose response: %+v", responseStr) + slog.Debug("Received compose response", "response", responseStr) // assume the response is markdown, // extract the contents of the first code block if there is one @@ -140,7 +140,7 @@ func generateComposeFile(ctx context.Context, fabric client.FabricClient, platfo composeContent = responseStr } else { previousError = err.Error() - term.Debugf("Failed to extract code block: %v. Retrying...", err) + slog.Debug("Failed to extract code block. Retrying...", "err", err) continue } } @@ -156,7 +156,7 @@ func generateComposeFile(ctx context.Context, fabric client.FabricClient, platfo _, err = compose.LoadFromContentWithInterpolation(ctx, []byte(composeContent), projectName) if err != nil { previousError = err.Error() - term.Debugf("Invalid compose file received: %v. Retrying...", err) + slog.Debug("Invalid compose file received. Retrying...", "err", err) continue } diff --git a/src/pkg/migrate/platform.go b/src/pkg/migrate/platform.go index 847d65538..62fbcc6ce 100644 --- a/src/pkg/migrate/platform.go +++ b/src/pkg/migrate/platform.go @@ -2,10 +2,10 @@ package migrate import ( "fmt" + "log/slog" "github.com/AlecAivazis/survey/v2" "github.com/DefangLabs/defang/src/pkg/surveyor" - "github.com/DefangLabs/defang/src/pkg/term" ) type SourcePlatform string @@ -70,6 +70,6 @@ func selectSourcePlatform(surveyor surveyor.Surveyor) (error, SourcePlatform) { return nil, sourcePlatform } - term.Warnf("Invalid source platform selected: %s. Please try again.", selectedOption) + slog.Warn(fmt.Sprintf("Invalid source platform selected: %s. Please try again.", selectedOption)) } } diff --git a/src/pkg/session/session.go b/src/pkg/session/session.go index 0a2c34fae..81d313591 100644 --- a/src/pkg/session/session.go +++ b/src/pkg/session/session.go @@ -3,6 +3,7 @@ package session import ( "context" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/clouds/aws" "github.com/DefangLabs/defang/src/pkg/modes" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type StacksManager interface { @@ -67,7 +67,7 @@ func (sl *SessionLoader) LoadSession(ctx context.Context) (*Session, error) { if stack.Provider == client.ProviderDefang { extraMsg = "; consider using BYOC (https://s.defang.io/byoc)" } - term.Infof("Using the %q stack on %s from %s%s", stack.Name, stack.Provider, whence, extraMsg) + slog.InfoContext(ctx, fmt.Sprintf("Using the %q stack on %s from %s%s", stack.Name, stack.Provider, whence, extraMsg)) printProviderMismatchWarnings(ctx, stack.Provider) return session, nil @@ -106,28 +106,28 @@ func printProviderMismatchWarnings(ctx context.Context, provider client.Provider // Ignore any env vars when explicitly using the Defang playground provider // Defaults to defang provider in non-interactive mode if env := pkg.AwsInEnv(); env != "" { - term.Warnf("AWS environment variables were detected (%v); did you forget --provider=aws or DEFANG_PROVIDER=aws?", env) + slog.WarnContext(ctx, fmt.Sprintf("AWS environment variables were detected (%v); did you forget --provider=aws or DEFANG_PROVIDER=aws?", env)) } if env := pkg.DoInEnv(); env != "" { - term.Warnf("DigitalOcean environment variable was detected (%v); did you forget --provider=digitalocean or DEFANG_PROVIDER=digitalocean?", env) + slog.WarnContext(ctx, fmt.Sprintf("DigitalOcean environment variable was detected (%v); did you forget --provider=digitalocean or DEFANG_PROVIDER=digitalocean?", env)) } if env := pkg.GcpInEnv(); env != "" { - term.Warnf("GCP project environment variable was detected (%v); did you forget --provider=gcp or DEFANG_PROVIDER=gcp?", env) + slog.WarnContext(ctx, fmt.Sprintf("GCP project environment variable was detected (%v); did you forget --provider=gcp or DEFANG_PROVIDER=gcp?", env)) } } switch provider { case client.ProviderAWS: if !awsInConfig(ctx) { - term.Warn("AWS provider was selected, but AWS environment is not set") + slog.WarnContext(ctx, "AWS provider was selected, but AWS environment is not set") } case client.ProviderDO: if env := pkg.DoInEnv(); env == "" { - term.Warn("DigitalOcean provider was selected, but DIGITALOCEAN_TOKEN environment variable is not set") + slog.WarnContext(ctx, "DigitalOcean provider was selected, but DIGITALOCEAN_TOKEN environment variable is not set") } case client.ProviderGCP: if env := pkg.GcpInEnv(); env == "" { - term.Warnf("GCP provider was selected, but no GCP project environment variable is set (%v)", pkg.GCPProjectEnvVars) + slog.WarnContext(ctx, fmt.Sprintf("GCP provider was selected, but no GCP project environment variable is set (%v)", pkg.GCPProjectEnvVars)) } } } diff --git a/src/pkg/setup/setup.go b/src/pkg/setup/setup.go index 53036ea1b..1629d085f 100644 --- a/src/pkg/setup/setup.go +++ b/src/pkg/setup/setup.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "strings" @@ -144,7 +145,7 @@ func (s *SetupClient) AIGenerate(ctx context.Context) (SetupResult, error) { track.Evt(GenerateStartedEvt, P("language", prompt.Language), P("description", prompt.Description), P("folder", folder), P("model", prompt.ModelID)) beforeGenerate(folder) - term.Info("Working on it. This may take 1 or 2 minutes...") + slog.InfoContext(ctx, "Working on it. This may take 1 or 2 minutes...") args := cli.GenerateArgs{ Description: prompt.Description, Folder: folder, @@ -180,7 +181,7 @@ func (s *SetupClient) CloneSample(ctx context.Context, sample string) (SetupResu } track.Evt(GenerateStartedEvt, P("sample", sample), P("folder", folder)) beforeGenerate(folder) - term.Info("Fetching sample from the Defang repository...") + slog.InfoContext(ctx, "Fetching sample from the Defang repository...") err = cli.InitFromSamples(ctx, folder, []string{sample}) if err != nil { return SetupResult{}, err @@ -231,7 +232,7 @@ func (s *SetupClient) MigrateFromHeroku(ctx context.Context) (SetupResult, error return SetupResult{}, err } - term.Info("Ok, let's create a compose file for your existing deployment.") + slog.InfoContext(ctx, "Ok, let's create a compose file for your existing deployment.") heroku := migrate.NewHerokuClient() composeFileContents, err := migrate.InteractiveSetup(ctx, s.Fabric, s.Surveyor, heroku, migrate.SourcePlatformHeroku) if err != nil { @@ -243,9 +244,9 @@ func (s *SetupClient) MigrateFromHeroku(ctx context.Context) (SetupResult, error return SetupResult{}, fmt.Errorf("failed to write compose file: %w", err) } - term.Info("Compose file written to", composeFilePath) - term.Info("Your application is now ready to deploy with Defang.") - term.Info("For next steps, visit https://s.defang.io/from-heroku") + slog.InfoContext(ctx, fmt.Sprint("Compose file written to", composeFilePath)) + slog.InfoContext(ctx, "Your application is now ready to deploy with Defang.") + slog.InfoContext(ctx, "For next steps, visit https://s.defang.io/from-heroku") return SetupResult{Folder: "."}, nil } diff --git a/src/pkg/stacks/manager.go b/src/pkg/stacks/manager.go index ce3d00f57..a4650ddc0 100644 --- a/src/pkg/stacks/manager.go +++ b/src/pkg/stacks/manager.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "path/filepath" "slices" @@ -14,7 +15,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/modes" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/timeutils" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -108,7 +108,7 @@ func (sm *manager) ListRemote(ctx context.Context) ([]ListItem, error) { bytes := stack.GetStackFile() params, err := NewParametersFromContent(name, bytes) if err != nil { - term.Warnf("Skipping invalid remote stack %s: %v\n", name, err) + slog.WarnContext(ctx, fmt.Sprintf("Skipping invalid remote stack %s: %v\n", name, err)) continue } // fill in missing fields from remote stack info @@ -150,7 +150,7 @@ func (sm *manager) Load(ctx context.Context, name string) (*Parameters, error) { params, err := sm.LoadLocal(name) if err != nil { if errors.Is(err, os.ErrNotExist) { - term.Infof("stack file not found, attempting to import from previous deployments: %v", err) + slog.InfoContext(ctx, fmt.Sprintf("stack file not found, attempting to import from previous deployments: %v", err)) return sm.GetRemote(ctx, name) } return nil, err @@ -283,7 +283,7 @@ func (sm *manager) getSpecifiedStack(ctx context.Context, name string) (*Paramet return nil, "", fmt.Errorf("failed to save imported stack %q to local directory: %w", name, err) } if stackFilename != "" { - term.Infof("Stack %q loaded and saved to %q. Add this file to source control.", name, stackFilename) + slog.InfoContext(ctx, fmt.Sprintf("Stack %q loaded and saved to %q. Add this file to source control.", name, stackFilename)) } return stack, whence + " and previous deployment", nil } @@ -324,7 +324,7 @@ func (sm *manager) getDefaultStack(ctx context.Context) (*Parameters, string, er return nil, whence, fmt.Errorf("using default stack %q for project %q, but the stack specifies COMPOSE_PROJECT_NAME=%q", res.Stack.Name, sm.projectName, pn) } if cf, ok := params.Variables["COMPOSE_FILE"]; ok { - term.Warnf("Using default stack %q for project %q, but the stack specifies COMPOSE_FILE=%q", res.Stack.Name, sm.projectName, cf) + slog.WarnContext(ctx, fmt.Sprintf("Using default stack %q for project %q, but the stack specifies COMPOSE_FILE=%q", res.Stack.Name, sm.projectName, cf)) } return params, whence, nil } diff --git a/src/pkg/stacks/stacks.go b/src/pkg/stacks/stacks.go index a01afb2f3..8bc9c68c5 100644 --- a/src/pkg/stacks/stacks.go +++ b/src/pkg/stacks/stacks.go @@ -3,6 +3,7 @@ package stacks import ( "errors" "fmt" + "log/slog" "os" "path/filepath" "regexp" @@ -180,13 +181,13 @@ func ListInDirectory(workingDirectory string) ([]ListItem, error) { filename := filename(workingDirectory, file.Name()) content, err := os.ReadFile(filename) if err != nil { - term.Warnf("Skipping unreadable stack file %s: %v\n", filename, err) + slog.Warn(fmt.Sprintf("Skipping unreadable stack file %s: %v\n", filename, err)) continue } params, err := NewParametersFromContent(file.Name(), content) if err != nil { - term.Warnf("Skipping invalid stack file %s: %v\n", filename, err) + slog.Warn(fmt.Sprintf("Skipping invalid stack file %s: %v\n", filename, err)) continue } stacks = append(stacks, ListItem{ @@ -245,7 +246,7 @@ func LoadStackEnv(params Parameters, overload bool) error { paramsMap := params.ToMap() for key, value := range paramsMap { if envValue, ok := currentEnv[key]; ok && envValue != value && !overload { - term.Warnf("The variable %q is set in both the stack and the environment. The value from the environment will be used.\n", key) + slog.Warn(fmt.Sprintf("The variable %q is set in both the stack and the environment. The value from the environment will be used.\n", key)) } if _, ok := currentEnv[key]; !ok || overload { err := os.Setenv(key, value) @@ -263,7 +264,7 @@ func filename(workingDirectory, stackname string) string { } func PrintCreateMessage(stackName string) { - term.Infof("A stack file has been created at `.defang/%s`.", stackName) + slog.Info(fmt.Sprintf("A stack file has been created at `.defang/%s`.", stackName)) term.Printf( "This file contains the configuration for this stack.\n"+ "We recommend you commit this file to source control, so it can be used by everyone on your team.\n"+ diff --git a/src/pkg/term/colorizer.go b/src/pkg/term/colorizer.go index b3bea0775..aa573c4f7 100644 --- a/src/pkg/term/colorizer.go +++ b/src/pkg/term/colorizer.go @@ -261,6 +261,31 @@ func (t *Term) Errorf(format string, v ...any) (int, error) { return output(t.err, ErrorColor, line) } +// WriteDebug writes a pre-formatted debug message (newline added if missing). +func (t *Term) WriteDebug(msg string) (int, error) { + if !t.DoDebug() { + return 0, nil + } + return output(t.err, DebugColor, ensurePrefix(debugPrefix, ensureNewline(msg))) +} + +// WriteInfo writes a pre-formatted info message (newline added if missing). +func (t *Term) WriteInfo(msg string) (int, error) { + return output(t.outOrErr(), InfoColor, ensurePrefix(infoPrefix, ensureNewline(msg))) +} + +// WriteWarn writes a pre-formatted warning message (newline added if missing) and accumulates it. +func (t *Term) WriteWarn(msg string) (int, error) { + msg = ensurePrefix(warnPrefix, ensureNewline(msg)) + t.warnings = append(t.warnings, msg) + return output(t.outOrErr(), WarnColor, msg) +} + +// WriteError writes a pre-formatted error message (newline added if missing). +func (t *Term) WriteError(msg string) (int, error) { + return output(t.err, ErrorColor, ensureNewline(msg)) +} + // Deprecated: use proper error handling instead func (t *Term) Fatal(msg any) { Error("Error:", msg) diff --git a/src/pkg/tokenstore/store.go b/src/pkg/tokenstore/store.go index 571f93870..e47383670 100644 --- a/src/pkg/tokenstore/store.go +++ b/src/pkg/tokenstore/store.go @@ -3,12 +3,11 @@ package tokenstore import ( "errors" "fmt" + "log/slog" "os" "path/filepath" "strings" "sync" - - "github.com/DefangLabs/defang/src/pkg/term" ) type TokenStore interface { @@ -33,7 +32,7 @@ func (s *LocalDirTokenStore) Save(key string, token string) error { return err } - term.Debug("Saving access token to", tokenFile) + slog.Debug(fmt.Sprint("Saving access token to", tokenFile)) dir, _ := filepath.Split(tokenFile) if err := os.MkdirAll(dir, 0700); err != nil { return fmt.Errorf("failed to create token directory: %w", err) @@ -51,7 +50,7 @@ func (s *LocalDirTokenStore) Load(key string) (string, error) { if err != nil { return "", err } - term.Debug("Reading access token from file", tokenFile) + slog.Debug(fmt.Sprint("Reading access token from file", tokenFile)) all, err := os.ReadFile(tokenFile) if err != nil { return "", fmt.Errorf("failed to read token: %w", err) @@ -59,6 +58,14 @@ func (s *LocalDirTokenStore) Load(key string) (string, error) { return string(all), nil } +func isWithinBase(baseDir, target string) bool { + rel, err := filepath.Rel(baseDir, target) + if err != nil { + return false + } + return rel == "." || (rel != ".." && !strings.HasPrefix(rel, ".."+string(os.PathSeparator)) && !filepath.IsAbs(rel)) +} + func (s *LocalDirTokenStore) List(prefix string) ([]string, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -79,8 +86,8 @@ func (s *LocalDirTokenStore) List(prefix string) ([]string, error) { if err != nil { return nil, fmt.Errorf("failed to resolve token store directory: %w", err) } - if !strings.HasPrefix(dir, baseDir) { - term.Warnf("Invalid token prefix %q: resolved directory %q is outside of token store base directory %q", prefix, dir, baseDir) + if !isWithinBase(baseDir, dir) { + slog.Warn(fmt.Sprintf("Invalid token prefix %q: resolved directory %q is outside of token store base directory %q", prefix, dir, baseDir)) return nil, errors.New("invalid token prefix") } @@ -110,7 +117,7 @@ func (s *LocalDirTokenStore) Delete(key string) error { if err := os.Remove(tokenFile); err != nil && !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("failed to delete token: %w", err) } - term.Debug("Removed token file:", tokenFile) + slog.Debug("Removed token file: " + tokenFile) return nil } @@ -130,7 +137,7 @@ func (s *LocalDirTokenStore) getTokenFile(key string) (string, error) { if err != nil { return "", fmt.Errorf("failed to resolve token store directory: %w", err) } - if !strings.HasPrefix(absTokenFilePath, absDir) { + if !isWithinBase(absDir, absTokenFilePath) { return "", errors.New("invalid token key") } return absTokenFilePath, nil diff --git a/src/pkg/track/track.go b/src/pkg/track/track.go index 088cc21fe..dc63f5be9 100644 --- a/src/pkg/track/track.go +++ b/src/pkg/track/track.go @@ -1,12 +1,12 @@ package track import ( + "log/slog" "strings" "sync" "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -40,10 +40,10 @@ func Evt(name string, props ...Property) { } tracker := Tracker if tracker == nil { - term.Debugf("untracked event %q: %v", name, props) + slog.Debug("untracked event", "name", name, "props", props) return } - term.Debugf("tracking event %q: %v", name, props) + slog.Debug("tracking event", "name", name, "props", props) trackWG.Add(1) go func() { defer trackWG.Done()