From a49e843c850be23633b89a8a1e996e85104f5094 Mon Sep 17 00:00:00 2001 From: jordanstephens Date: Fri, 17 Apr 2026 17:00:55 -0700 Subject: [PATCH 1/7] fix: migrate from term to slog in tests Migrated test infrastructure from term to slog to support the slog migration in the CLI. Tests that mock term.DefaultTerm now need to also update the slog default logger to capture output for assertions. Key changes in test files: - Added slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) after replacing term.DefaultTerm to ensure slog output goes to the mock term buffer - Added required imports: log/slog and github.com/DefangLabs/defang/src/pkg/logs - In validation_test.go, renamed 'logs' variable to 'logBuf' to avoid shadowing the logs package Files modified: - cmd/cli/command/compose_test.go - cmd/cli/command/stack_test.go (MockTerm helper) - pkg/cli/client/byoc/aws/byoc_test.go - pkg/cli/compose/compose_test.go - pkg/cli/compose/serviceNameReplacer_test.go - pkg/cli/compose/validation_test.go - pkg/cli/configList_test.go - pkg/cli/configResolution_test.go - pkg/cli/deploymentsList_test.go - pkg/cli/getServices_test.go - pkg/cli/tail_test.go (also fixed TestTail assertion for variable trailing spaces) Also fixed: - Added mutex to termHandler in slog.go to protect against data races when multiple goroutines write concurrently (e.g., in TailAndMonitor) - Fixed TestTail assertion to handle variable number of trailing spaces when clearing the reconnection message (depends on terminal width) --- src/cmd/cli/command/commands.go | 11 ++- src/cmd/cli/command/compose.go | 55 +++++------ src/cmd/cli/command/compose_test.go | 3 + src/cmd/cli/command/config.go | 11 ++- src/cmd/cli/command/estimate.go | 3 +- src/cmd/cli/command/generate.go | 9 +- src/cmd/cli/command/globals.go | 10 +- src/cmd/cli/command/login.go | 5 +- src/cmd/cli/command/logout.go | 5 +- src/cmd/cli/command/mcp.go | 17 ++-- src/cmd/cli/command/session.go | 15 +-- src/cmd/cli/command/stack.go | 9 +- src/cmd/cli/command/stack_test.go | 3 + src/cmd/cli/command/whoami.go | 7 +- src/cmd/cli/command/workspace.go | 3 +- src/pkg/agent/common/common.go | 14 +-- src/pkg/agent/generator.go | 5 +- src/pkg/agent/plugins/compat_oai/generate.go | 4 +- src/pkg/agent/toolmanager.go | 4 +- src/pkg/agent/tools/deploy.go | 10 +- src/pkg/agent/tools/destroy.go | 8 +- src/pkg/agent/tools/estimate.go | 10 +- src/pkg/agent/tools/listConfig.go | 10 +- src/pkg/agent/tools/logs.go | 10 +- src/pkg/agent/tools/provider.go | 4 +- src/pkg/agent/tools/removeConfig.go | 6 +- src/pkg/agent/tools/services.go | 10 +- src/pkg/agent/tools/setConfig.go | 10 +- src/pkg/auth/auth.go | 11 ++- src/pkg/cli/cd.go | 19 ++-- src/pkg/cli/cert.go | 55 +++++------ src/pkg/cli/client/byoc/aws/alb_logs.go | 4 +- src/pkg/cli/client/byoc/aws/byoc.go | 63 ++++++------- src/pkg/cli/client/byoc/aws/byoc_test.go | 2 + src/pkg/cli/client/byoc/aws/domain.go | 21 +++-- src/pkg/cli/client/byoc/aws/list.go | 9 +- src/pkg/cli/client/byoc/aws/stream.go | 7 +- src/pkg/cli/client/byoc/aws/subscribe.go | 5 +- src/pkg/cli/client/byoc/baseclient.go | 4 +- src/pkg/cli/client/byoc/common.go | 9 +- src/pkg/cli/client/byoc/do/byoc.go | 13 +-- src/pkg/cli/client/byoc/gcp/byoc.go | 31 ++++--- src/pkg/cli/client/byoc/gcp/stream.go | 34 +++---- src/pkg/cli/client/byoc/state/parse.go | 6 +- src/pkg/cli/client/caniuse.go | 19 ++-- src/pkg/cli/client/cluster.go | 9 +- src/pkg/cli/client/grpc_logger.go | 5 +- src/pkg/cli/client/playground.go | 5 +- src/pkg/cli/client/pretty_error.go | 4 +- src/pkg/cli/client/projectName.go | 7 +- src/pkg/cli/common.go | 3 +- src/pkg/cli/compose/baseimage.go | 4 +- src/pkg/cli/compose/compose_test.go | 3 + src/pkg/cli/compose/context.go | 21 +++-- src/pkg/cli/compose/dockerfile_validator.go | 8 +- src/pkg/cli/compose/fixup.go | 46 +++++----- src/pkg/cli/compose/loader.go | 9 +- src/pkg/cli/compose/serviceNameReplacer.go | 11 ++- .../cli/compose/serviceNameReplacer_test.go | 3 + src/pkg/cli/compose/validation.go | 92 +++++++++---------- src/pkg/cli/compose/validation_test.go | 19 ++-- src/pkg/cli/composeDown.go | 6 +- src/pkg/cli/composeUp.go | 13 +-- src/pkg/cli/configDelete.go | 5 +- src/pkg/cli/configList.go | 8 +- src/pkg/cli/configList_test.go | 4 + src/pkg/cli/configResolution.go | 3 +- src/pkg/cli/configResolution_test.go | 4 + src/pkg/cli/configSet.go | 4 +- src/pkg/cli/connect.go | 9 +- src/pkg/cli/deploymentsList.go | 9 +- src/pkg/cli/deploymentsList_test.go | 6 ++ src/pkg/cli/estimate.go | 9 +- src/pkg/cli/generate.go | 5 +- src/pkg/cli/getServices.go | 13 +-- src/pkg/cli/getServices_test.go | 3 + src/pkg/cli/install_cd.go | 4 +- src/pkg/cli/logout.go | 11 ++- src/pkg/cli/new.go | 9 +- src/pkg/cli/subscribe.go | 15 +-- src/pkg/cli/tail.go | 30 +++--- src/pkg/cli/tailAndMonitor.go | 13 +-- src/pkg/cli/tail_test.go | 19 +++- src/pkg/cli/teardown_cd.go | 6 +- src/pkg/cli/token.go | 6 +- src/pkg/cli/upgrade.go | 8 +- src/pkg/cli/waitForCdTaskExit.go | 2 +- src/pkg/cli/whoami.go | 7 +- src/pkg/clouds/aws/codebuild/cfn/setup.go | 10 +- src/pkg/clouds/aws/login.go | 49 +++++----- src/pkg/clouds/do/appPlatform/setup.go | 10 +- src/pkg/clouds/gcp/api.go | 9 +- src/pkg/clouds/gcp/iam.go | 34 +++---- src/pkg/clouds/gcp/logging.go | 8 +- src/pkg/clouds/gcp/login.go | 43 ++++----- src/pkg/clouds/gcp/storage.go | 6 +- src/pkg/debug/debug_test.go | 4 +- src/pkg/dockerhub/dockerhub.go | 4 +- src/pkg/github/version.go | 6 +- src/pkg/login/agree_tos.go | 8 +- src/pkg/login/login.go | 19 ++-- src/pkg/logs/slog.go | 75 +++++++++------ src/pkg/mcp/mcp_server.go | 8 +- src/pkg/mcp/resources/resources.go | 10 +- src/pkg/mcp/setup.go | 9 +- src/pkg/mcp/utils.go | 32 +++---- src/pkg/migrate/heroku.go | 44 ++++----- src/pkg/migrate/migrate.go | 20 ++-- src/pkg/migrate/platform.go | 4 +- src/pkg/session/session.go | 16 ++-- src/pkg/setup/setup.go | 13 +-- src/pkg/stacks/manager.go | 10 +- src/pkg/stacks/stacks.go | 9 +- src/pkg/term/colorizer.go | 25 +++++ src/pkg/tokenstore/store.go | 11 +-- src/pkg/track/track.go | 7 +- 116 files changed, 858 insertions(+), 708 deletions(-) diff --git a/src/cmd/cli/command/commands.go b/src/cmd/cli/command/commands.go index 0cb625658..c125fc803 100644 --- a/src/cmd/cli/command/commands.go +++ b/src/cmd/cli/command/commands.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "regexp" "strings" @@ -49,7 +50,7 @@ func Execute(ctx context.Context) error { if err := RootCmd.ExecuteContext(ctx); err != nil { if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { - term.Error("Error:", client.PrettyError(err)) + slog.Error(fmt.Sprintln("Error:", client.PrettyError(err))) track.Evt("CLI Error", P("err", err)) } @@ -106,7 +107,7 @@ func Execute(ctx context.Context) error { if global.HasTty && !global.HideUpdate && pkg.RandomIndex(10) == 0 { if latest, err := github.GetLatestReleaseTag(ctx); err == nil && isNewer(GetCurrentVersion(), latest) { - term.Debug("Latest Version:", latest, "Current Version:", GetCurrentVersion()) + slog.Debug(fmt.Sprintln("Latest Version:", latest, "Current Version:", GetCurrentVersion())) term.Println("A newer version of the CLI is available at https://github.com/DefangLabs/defang/releases/latest") if pkg.RandomIndex(10) == 0 && !pkg.GetenvBool("DEFANG_HIDE_HINTS") { term.Println("To silence these notices, do: export DEFANG_HIDE_UPDATE=1") @@ -409,16 +410,16 @@ var RootCmd = &cobra.Command{ if connect.CodeOf(err) != connect.CodeUnauthenticated { return err } - term.Debug("Using existing token failed; continuing to allow login/ToS flow:", err) + slog.Debug(fmt.Sprintln("Using existing token failed; continuing to allow login/ToS flow:", err)) } track.Tracker = global.Client // update tracker with the real client if v, err := global.Client.GetVersions(ctx); err == nil { version := cmd.Root().Version // HACK to avoid circular dependency with RootCmd - term.Debug("Fabric:", v.Fabric, "CLI:", version, "CLI-Min:", v.CliMin) + slog.Debug(fmt.Sprintln("Fabric:", v.Fabric, "CLI:", version, "CLI-Min:", v.CliMin)) if global.HasTty && isNewer(version, v.CliMin) && !isUpgradeCommand(cmd) { - term.Warn("Your CLI version is outdated. Please upgrade to the latest version by running:\n\n defang upgrade\n") + slog.Warn("Your CLI version is outdated. Please upgrade to the latest version by running:\n\n defang upgrade\n") global.HideUpdate = true // hide the upgrade hint at the end } } diff --git a/src/cmd/cli/command/compose.go b/src/cmd/cli/command/compose.go index 92a82f34e..e9b0864a4 100644 --- a/src/cmd/cli/command/compose.go +++ b/src/cmd/cli/command/compose.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "log/slog" "slices" "strings" "time" @@ -35,7 +36,7 @@ const SERVICE_PORTAL_URL = "https://" + DEFANG_PORTAL_HOST + "/service" func printPlaygroundPortalServiceURLs(serviceInfos []*defangv1.ServiceInfo) { // We can only show services deployed to the prod1 defang SaaS environment. if global.Stack.Provider == client.ProviderDefang && global.FabricAddr == client.DefaultFabricAddr { - term.Info("Monitor your services' status in the defang portal") + slog.Info("Monitor your services' status in the defang portal") for _, serviceInfo := range serviceInfos { term.Println(" -", SERVICE_PORTAL_URL+"/"+serviceInfo.Service.Name) } @@ -94,9 +95,9 @@ func makeComposeUpCmd() *cobra.Command { Type: defangv1.DeploymentType_DEPLOYMENT_TYPE_ACTIVE, Stack: session.Stack.Name, }); err != nil { - term.Debugf("ListDeployments failed: %v", err) + slog.Debug(fmt.Sprintf("ListDeployments failed: %v", err)) } else if accountInfo, err := session.Provider.AccountInfo(ctx); err != nil { - term.Debugf("AccountInfo failed: %v", err) + slog.Debug(fmt.Sprintf("AccountInfo failed: %v", err)) } else if len(resp.Deployments) > 0 { workingDir, _ := session.Loader.ProjectWorkingDir(ctx) confirmed, err := confirmDeployment(workingDir, resp.Deployments, accountInfo, session.Provider.GetStackName()) @@ -115,7 +116,7 @@ func makeComposeUpCmd() *cobra.Command { Mode: session.Stack.Mode, }) if err != nil { - term.Debug("Failed to create stack:", err) + slog.Debug(fmt.Sprintln("Failed to create stack:", err)) } } @@ -127,7 +128,7 @@ func makeComposeUpCmd() *cobra.Command { } } if len(managedServices) > 0 { - term.Warnf("Defang cannot monitor status of the following managed service(s): %v.\n To check if the managed service is up, check the status of the service which depends on it.", managedServices) + slog.Warn(fmt.Sprintf("Defang cannot monitor status of the following managed service(s): %v.\n To check if the managed service is up, check the status of the service which depends on it.", managedServices)) } deploy, project, err := cli.ComposeUp(ctx, global.Client, session.Provider, session.Stack, cli.ComposeUpParams{ @@ -151,7 +152,7 @@ func makeComposeUpCmd() *cobra.Command { printPlaygroundPortalServiceURLs(deploy.Services) if detach { - term.Info("Detached.") + slog.Info("Detached.") return nil } @@ -160,7 +161,7 @@ func makeComposeUpCmd() *cobra.Command { if deploy.Etag != "" { tailSource = "deployment ID " + deploy.Etag } - term.Info("Tailing logs for", tailSource, "; press Ctrl+C to detach:") + slog.Info(fmt.Sprintln("Tailing logs for", tailSource, "; press Ctrl+C to detach:")) tailOptions := newTailOptionsForDeploy(session.Stack.Name, deploy.Etag, since, global.Verbose) serviceStates, err := cli.TailAndMonitor(ctx, project, session.Provider, time.Duration(waitTimeout)*time.Second, tailOptions) @@ -168,7 +169,7 @@ func makeComposeUpCmd() *cobra.Command { deploymentErr := err debugger, err := debug.NewDebugger(ctx, global.FabricAddr, session.Stack) if err != nil { - term.Warn("Failed to initialize debugger:", err) + slog.Warn(fmt.Sprintln("Failed to initialize debugger:", err)) return deploymentErr } handleTailAndMonitorErr(ctx, deploymentErr, debugger, debug.DebugConfig{ @@ -197,7 +198,7 @@ func makeComposeUpCmd() *cobra.Command { return err } - term.Info("Done.") + slog.Info("Done.") flushWarnings() return nil }, @@ -246,7 +247,7 @@ func confirmDeployment(targetDirectory string, existingDeployments []*defangv1.D Mode: global.Stack.Mode, }) if err != nil { - term.Debugf("Failed to create stack %v", err) + slog.Debug(fmt.Sprintf("Failed to create stack %v", err)) } else { stacks.PrintCreateMessage(stackName) } @@ -255,7 +256,7 @@ func confirmDeployment(targetDirectory string, existingDeployments []*defangv1.D } func printExistingDeployments(existingDeployments []*defangv1.Deployment) { - term.Info("This project was previously deployed to the following locations:") + slog.Info("This project was previously deployed to the following locations:") deploymentStrings := make([]string, 0, len(existingDeployments)) for _, dep := range existingDeployments { var providerId client.ProviderID @@ -283,7 +284,7 @@ func confirmDeploymentToNewLocation() (bool, error) { func promptToCreateStack(ctx context.Context, targetDirectory string, params stacks.Parameters) error { if global.NonInteractive { - term.Info("Consider creating a stack to manage your deployments.") + slog.Info("Consider creating a stack to manage your deployments.") printDefangHint("To create a stack, do:", "stack new --name="+params.Name) return nil } @@ -310,7 +311,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * } if connect.CodeOf(originalErr) == connect.CodeResourceExhausted && strings.Contains(originalErr.Error(), "maximum number of projects") { - term.Error("Error:", client.PrettyError(originalErr)) + slog.Error(fmt.Sprintln("Error:", client.PrettyError(originalErr))) err := handleTooManyProjectsError(ctx, provider, originalErr) if err != nil { return originalErr @@ -322,7 +323,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * return originalErr } - term.Error("Error:", client.PrettyError(originalErr)) + slog.Error(fmt.Sprintln("Error:", client.PrettyError(originalErr))) return debugger.DebugDeploymentError(ctx, debug.DebugConfig{ Project: project, }, originalErr) @@ -331,7 +332,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * func handleTooManyProjectsError(ctx context.Context, provider client.Provider, originalErr error) error { projectName, err := provider.RemoteProjectName(ctx) if err != nil { - term.Warn("failed to get remote project name:", err) + slog.Warn(fmt.Sprintln("failed to get remote project name:", err)) return originalErr } @@ -343,7 +344,7 @@ func handleTooManyProjectsError(ctx context.Context, provider client.Provider, o _, err = cli.InteractiveComposeDown(ctx, projectName, global.Client, provider) if err != nil { - term.Warn("ComposeDown failed:", err) + slog.Warn(fmt.Sprintln("ComposeDown failed:", err)) printDefangHint("To deactivate a project, do:", "compose down --project-name "+projectName) return originalErr } else { @@ -358,7 +359,7 @@ func handleTailAndMonitorErr(ctx context.Context, err error, debugger *debug.Deb var errDeploymentFailed client.ErrDeploymentFailed if errors.As(err, &errDeploymentFailed) { // Tail got canceled because of deployment failure: prompt to show the debugger - term.Warn(errDeploymentFailed) + slog.Warn(fmt.Sprintf("%v", errDeploymentFailed)) if errDeploymentFailed.Service != "" { debugConfig.FailedServices = []string{errDeploymentFailed.Service} } @@ -437,21 +438,21 @@ func makeComposeDownCmd() *cobra.Command { if err != nil { if connect.CodeOf(err) == connect.CodeNotFound { // Show a warning (not an error) if the service was not found - term.Warn(client.PrettyError(err)) + slog.Warn(fmt.Sprintf("%v", client.PrettyError(err))) return nil } return err } - term.Info("Deleted services, deployment ID", deployment) + slog.Info(fmt.Sprintln("Deleted services, deployment ID", deployment)) listConfigs, err := session.Provider.ListConfig(cmd.Context(), &defangv1.ListConfigsRequest{Project: projectName}) if err == nil { if len(listConfigs.Names) > 0 { - term.Warn("Stored project configs are not deleted.") + slog.Warn("Stored project configs are not deleted.") } } else { - term.Debugf("ListConfigs failed: %v", err) + slog.Debug(fmt.Sprintf("ListConfigs failed: %v", err)) } if detach { @@ -468,12 +469,12 @@ func makeComposeDownCmd() *cobra.Command { // different than `up`, which will wait for the deployment to finish, but we don't have an // ECS event subscription for `down` so we can't wait for the deployment to finish. // Instead, we'll just show a warning and detach. - term.Warn("Unable to tail logs. Detaching.") + slog.Warn("Unable to tail logs. Detaching.") return nil } return err } - term.Info("Done.") + slog.Info("Done.") if len(listConfigs.Names) > 0 { printDefangHint("To delete stored project configs, run:", "config rm --project-name="+projectName+" "+strings.Join(listConfigs.Names, " ")) } @@ -520,7 +521,7 @@ func makeComposeConfigCmd() *cobra.Command { CheckAccountInfo: false, }) if err != nil { - term.Warn("unable to load stack:", err, "- some information may not be up-to-date") + slog.Warn(fmt.Sprintln("unable to load stack:", err, "- some information may not be up-to-date")) sessionx = &session.Session{ Loader: configureLoaderForCommand(cmd), Provider: client.NewPlaygroundProvider(global.Client, stacks.DefaultBeta), @@ -530,7 +531,7 @@ func makeComposeConfigCmd() *cobra.Command { _, err = sessionx.Provider.AccountInfo(ctx) if err != nil { - term.Warn("unable to connect to cloud provider:", err, "- some information may not be up-to-date") + slog.Warn(fmt.Sprintln("unable to connect to cloud provider:", err, "- some information may not be up-to-date")) } project, loadErr := sessionx.Loader.LoadProject(ctx) @@ -580,7 +581,7 @@ func makeComposePsCmd() *cobra.Command { return err } - term.Warn(err) + slog.Warn(fmt.Sprintf("%v", err)) printDefangHint("To start a new project, do:", "new") return nil } @@ -676,7 +677,7 @@ func handleLogsCmd(cmd *cobra.Command, args []string) error { if pkg.IsValidTime(untilTs) { rangeStr += " until " + untilTs.Format(time.RFC3339Nano) } - term.Infof("Showing logs%s; press Ctrl+C to stop:", rangeStr) + slog.Info(fmt.Sprintf("Showing logs%s; press Ctrl+C to stop:", rangeStr)) services := args if len(name) > 0 { diff --git a/src/cmd/cli/command/compose_test.go b/src/cmd/cli/command/compose_test.go index ccd324664..c1b8c4d71 100644 --- a/src/cmd/cli/command/compose_test.go +++ b/src/cmd/cli/command/compose_test.go @@ -3,11 +3,13 @@ package command import ( "bytes" "context" + "log/slog" "os" "testing" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -31,6 +33,7 @@ func TestPrintPlaygroundPortalServiceURLs(t *testing.T) { var stdout, stderr bytes.Buffer term.DefaultTerm = term.NewTerm(os.Stdin, &stdout, &stderr) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) global.Stack.Provider = client.ProviderDefang global.FabricAddr = client.DefaultFabricAddr diff --git a/src/cmd/cli/command/config.go b/src/cmd/cli/command/config.go index b779112e4..5d21e1763 100644 --- a/src/cmd/cli/command/config.go +++ b/src/cmd/cli/command/config.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "strings" @@ -164,13 +165,13 @@ var configSetCmd = &cobra.Command{ if err != nil { errs = append(errs, err) } else if ifNotSet && !didSet { - term.Info("Config", name, "is already set; skipping due to --if-not-set flag") + slog.Info(fmt.Sprintln("Config", name, "is already set; skipping due to --if-not-set flag")) } else { - term.Info("Updated value for", name) + slog.Info(fmt.Sprintln("Updated value for", name)) } } - term.Infof("Successfully set %d config value(s)", len(envMap)-len(errs)) + slog.Info(fmt.Sprintf("Successfully set %d config value(s)", len(envMap)-len(errs))) printDefangHint("To update the deployed values, do:", "compose up") return errors.Join(errs...) @@ -197,12 +198,12 @@ var configDeleteCmd = &cobra.Command{ if err := cli.ConfigDelete(cmd.Context(), projectName, session.Provider, names...); err != nil { // Show a warning (not an error) if the config was not found if connect.CodeOf(err) == connect.CodeNotFound { - term.Warn(client.PrettyError(err)) + slog.Warn(fmt.Sprintf("%v", client.PrettyError(err))) return nil } return err } - term.Info("Deleted", names) + slog.Info(fmt.Sprintln("Deleted", names)) printDefangHint("To list the configs (but not their values), do:", "config ls") return nil diff --git a/src/cmd/cli/command/estimate.go b/src/cmd/cli/command/estimate.go index c81911c30..53f0e0d01 100644 --- a/src/cmd/cli/command/estimate.go +++ b/src/cmd/cli/command/estimate.go @@ -2,6 +2,7 @@ package command import ( "fmt" + "log/slog" "github.com/AlecAivazis/survey/v2" "github.com/DefangLabs/defang/src/pkg" @@ -54,7 +55,7 @@ func makeEstimateCmd() *cobra.Command { if err != nil { return fmt.Errorf("failed to run estimate: %w", err) } - term.Debugf("Estimate: %+v", estimate) + slog.Debug(fmt.Sprintf("Estimate: %+v", estimate)) cli.PrintEstimate(global.Stack.Mode, estimate, term.DefaultTerm) diff --git a/src/cmd/cli/command/generate.go b/src/cmd/cli/command/generate.go index aabef1818..68828ec85 100644 --- a/src/cmd/cli/command/generate.go +++ b/src/cmd/cli/command/generate.go @@ -3,6 +3,8 @@ package command import ( "context" "errors" + "fmt" + "log/slog" "os/exec" "path/filepath" @@ -11,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/compose" "github.com/DefangLabs/defang/src/pkg/setup" "github.com/DefangLabs/defang/src/pkg/surveyor" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/spf13/cobra" ) @@ -51,12 +52,12 @@ var generateCmd = &cobra.Command{ } func afterGenerate(ctx context.Context, result setup.SetupResult) { - term.Info("Code generated successfully in folder", result.Folder) + slog.Info(fmt.Sprintln("Code generated successfully in folder", result.Folder)) editor := pkg.Getenv("DEFANG_EDITOR", "code") // TODO: should we use EDITOR env var instead? But won't handle terminal editors like vim cmdd := exec.Command(editor, result.Folder) err := cmdd.Start() if err != nil { - term.Debugf("unable to launch editor %q: %v", editor, err) + slog.Debug(fmt.Sprintf("unable to launch editor %q: %v", editor, err)) } cd := "" @@ -68,7 +69,7 @@ func afterGenerate(ctx context.Context, result setup.SetupResult) { loader := compose.NewLoader(compose.WithPath(filepath.Join(result.Folder, "compose.yaml"))) project, err := loader.LoadProject(ctx) if err != nil { - term.Debugf("unable to load new project: %v", err) + slog.Debug(fmt.Sprintf("unable to load new project: %v", err)) } var envInstructions []string diff --git a/src/cmd/cli/command/globals.go b/src/cmd/cli/command/globals.go index cf1b88ed6..6d6b3353e 100644 --- a/src/cmd/cli/command/globals.go +++ b/src/cmd/cli/command/globals.go @@ -1,6 +1,8 @@ package command import ( + "fmt" + "log/slog" "os" "strconv" @@ -95,7 +97,7 @@ func NewGlobalConfig() *GlobalConfig { if fromEnv, ok := os.LookupEnv("DEFANG_COLOR"); ok { err := color.Set(fromEnv) if err != nil { - term.Debugf("invalid DEFANG_COLOR value: %v", err) + slog.Debug(fmt.Sprintf("invalid DEFANG_COLOR value: %v", err)) } } @@ -103,7 +105,7 @@ func NewGlobalConfig() *GlobalConfig { if fromEnv, ok := os.LookupEnv("DEFANG_PROVIDER"); ok { err := provider.Set(fromEnv) if err != nil { - term.Debugf("invalid DEFANG_PROVIDER value: %v", err) + slog.Debug(fmt.Sprintf("invalid DEFANG_PROVIDER value: %v", err)) } } @@ -111,7 +113,7 @@ func NewGlobalConfig() *GlobalConfig { if fromEnv, ok := os.LookupEnv("DEFANG_MODE"); ok { err := mode.Set(fromEnv) if err != nil { - term.Debugf("invalid DEFANG_MODE value: %v", err) + slog.Debug(fmt.Sprintf("invalid DEFANG_MODE value: %v", err)) } } @@ -123,7 +125,7 @@ func NewGlobalConfig() *GlobalConfig { tenant = types.TenantNameOrID(fromEnv) } else if fromEnv, ok := os.LookupEnv("DEFANG_ORG"); ok { tenant = types.TenantNameOrID(fromEnv) - term.Warn("DEFANG_ORG is deprecated; use DEFANG_WORKSPACE instead") + slog.Warn("DEFANG_ORG is deprecated; use DEFANG_WORKSPACE instead") } return &GlobalConfig{ diff --git a/src/cmd/cli/command/login.go b/src/cmd/cli/command/login.go index 31da0c32b..f0fd80afa 100644 --- a/src/cmd/cli/command/login.go +++ b/src/cmd/cli/command/login.go @@ -1,8 +1,9 @@ package command import ( + "log/slog" + "github.com/DefangLabs/defang/src/pkg/login" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" "github.com/spf13/cobra" ) @@ -32,7 +33,7 @@ var loginCmd = &cobra.Command{ if err := global.Client.SetOptions(cmd.Context(), req); err != nil { return err } - term.Info("Options updated successfully") + slog.Info("Options updated successfully") } return nil }, diff --git a/src/cmd/cli/command/logout.go b/src/cmd/cli/command/logout.go index 3c3ca3c12..13caea704 100644 --- a/src/cmd/cli/command/logout.go +++ b/src/cmd/cli/command/logout.go @@ -1,8 +1,9 @@ package command import ( + "log/slog" + "github.com/DefangLabs/defang/src/pkg/cli" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/spf13/cobra" ) @@ -15,7 +16,7 @@ var logoutCmd = &cobra.Command{ if err := cli.Logout(cmd.Context(), global.Client, global.FabricAddr); err != nil { return err } - term.Info("Successfully logged out") + slog.Info("Successfully logged out") return nil }, } diff --git a/src/cmd/cli/command/mcp.go b/src/cmd/cli/command/mcp.go index a439491a5..72b5fc889 100644 --- a/src/cmd/cli/command/mcp.go +++ b/src/cmd/cli/command/mcp.go @@ -2,6 +2,7 @@ package command import ( "fmt" + "log/slog" "os" "path/filepath" @@ -32,14 +33,14 @@ var mcpServerCmd = &cobra.Command{ mcpClient, err := mcp.ParseMCPClient(ideClient) if err != nil { - term.Warnf("Unable to parse MCP client: %v", err) + slog.Warn(fmt.Sprintf("Unable to parse MCP client: %v", err)) mcpClient = mcp.MCPClientUnspecified } - term.Debug("Creating log file") + slog.Debug("Creating log file") logFile, err := os.OpenFile(filepath.Join(client.StateDir, "defang-mcp.log"), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) if err != nil { - term.Warnf("Failed to open log file: %v", err) + slog.Warn(fmt.Sprintf("Failed to open log file: %v", err)) } else { defer logFile.Close() term.DefaultTerm = term.NewTerm(os.Stdin, logFile, logFile) @@ -47,7 +48,7 @@ var mcpServerCmd = &cobra.Command{ } // Create a new MCP server - term.Debug("Creating MCP server") + slog.Debug("Creating MCP server") s, err := mcp.NewDefangMCPServer(RootCmd.Version, mcpClient, tools.DefaultToolCLI{}, mcp.StackConfig{ FabricAddr: global.FabricAddr, Stack: &global.Stack, @@ -73,7 +74,7 @@ var mcpSetupCmd = &cobra.Command{ Short: "Setup MCP client for defang MCP server", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - term.Debug("Setting up MCP client") + slog.Debug("Setting up MCP client") client, _ := cmd.Flags().GetString("client") if client != "" { @@ -87,18 +88,18 @@ var mcpSetupCmd = &cobra.Command{ client = string(mcp.MCPClientWindsurf) } - term.Debugf("Using MCP client flag: %q", client) + slog.Debug(fmt.Sprintf("Using MCP client flag: %q", client)) if err := mcp.SetupClient(client); err != nil { return err } } else { - term.Debugf("Using MCP client picker: %q", client) + slog.Debug(fmt.Sprintf("Using MCP client picker: %q", client)) clients, err := mcp.SelectMCPclients() if err != nil { return err } for _, client := range clients { - term.Debugf("Selected MCP client using picker: %q", client) + slog.Debug(fmt.Sprintf("Selected MCP client using picker: %q", client)) if err := mcp.SetupClient(client); err != nil { return err diff --git a/src/cmd/cli/command/session.go b/src/cmd/cli/command/session.go index 9766085b6..308a2c64d 100644 --- a/src/cmd/cli/command/session.go +++ b/src/cmd/cli/command/session.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "path/filepath" "strings" @@ -41,7 +42,7 @@ func newCommandSessionWithOpts(cmd *cobra.Command, opts commandSessionOpts) (*se if !errors.Is(err, types.ErrComposeFileNotFound) { return nil, err } - term.Debugf("Could not create stack manager: %v", err) + slog.Debug(fmt.Sprintf("Could not create stack manager: %v", err)) } sessionLoader := session.NewSessionLoader(global.Client, sm, options) session, err := sessionLoader.LoadSession(ctx) @@ -69,15 +70,15 @@ func newSessionLoaderOptionsForCommand(cmd *cobra.Command) session.SessionLoader var maybeProvider client.ProviderID if maybeProvider.Set(projectName) == nil && !cmd.Flag("provider").Changed { // using -p with a provider name instead of -P - term.Warnf("Project name %q looks like a provider name; did you mean to use -P=%s instead of -p?", projectName, projectName) + slog.Warn(fmt.Sprintf("Project name %q looks like a provider name; did you mean to use -P=%s instead of -p?", projectName, projectName)) doubleCheckProjectName(projectName) } else if strings.HasPrefix(projectName, "roject-name") { // -project-name= instead of --project-name - term.Warn("Did you mean to use --project-name instead of -project-name?") + slog.Warn("Did you mean to use --project-name instead of -project-name?") doubleCheckProjectName(projectName) } else if strings.HasPrefix(projectName, "rovider") { // -provider= instead of --provider - term.Warn("Did you mean to use --provider instead of -provider?") + slog.Warn("Did you mean to use --provider instead of -provider?") doubleCheckProjectName(projectName) } } @@ -113,7 +114,7 @@ func newStackManagerForLoader(ctx context.Context, loader *compose.Loader) (sess if !errors.Is(err, types.ErrComposeFileNotFound) { return nil, handleInvalidComposeFileErr(ctx, err) } - term.Debugf("Could not determine project working directory: %v", err) + slog.Debug(fmt.Sprintf("Could not determine project working directory: %v", err)) // No project directory; look for .defang directory in current or parent directories targetDirectory, _ = findTargetDirectory(".") } else { @@ -124,7 +125,7 @@ func newStackManagerForLoader(ctx context.Context, loader *compose.Loader) (sess } projectName, _, err := loader.LoadProjectName(ctx) if err != nil { - term.Debugf("Could not determine project name: %v", err) + slog.Debug(fmt.Sprintf("Could not determine project name: %v", err)) } sm, err := stacks.NewManager(global.Client, targetDirectory, projectName, ec) if err != nil { @@ -166,7 +167,7 @@ func handleInvalidComposeFileErr(ctx context.Context, loadErr error) error { return loadErr } - term.Error("Cannot load project:", loadErr) + slog.Error(fmt.Sprintln("Cannot load project:", loadErr)) project, err := compose.NewLoader().CreateProjectForDebug() if err != nil { return fmt.Errorf("%w; original error: %w", err, loadErr) diff --git a/src/cmd/cli/command/stack.go b/src/cmd/cli/command/stack.go index 1c94d1811..5db887ec1 100644 --- a/src/cmd/cli/command/stack.go +++ b/src/cmd/cli/command/stack.go @@ -3,6 +3,7 @@ package command import ( "context" "fmt" + "log/slog" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli" @@ -92,7 +93,7 @@ func makeStackNewCmd() *cobra.Command { return fmt.Errorf("stack with name %q already exists in project %q", params.Name, projectName) } - term.Debugf("Creating stack with parameters: %+v\n", params) + slog.Debug(fmt.Sprintf("Creating stack with parameters: %+v\n", params)) _, err = stacks.CreateInDirectory(".", params) if err != nil { @@ -135,8 +136,8 @@ func makeStackListCmd() *cobra.Command { } if len(stacks) == 0 { - _, err = term.Infof("No Defang stacks found in the current directory.\n") - return err + slog.Info("No Defang stacks found in the current directory.") + return nil } columns := []string{"Name", "Default", "Provider", "Region", "Account", "Mode", "DeployedAt"} @@ -173,7 +174,7 @@ func makeStackDefaultCmd() *cobra.Command { return err } - term.Info(fmt.Sprintf("Stack %q is now the default stack for project %q\n", name, projectName)) + slog.Info(fmt.Sprintf("Stack %q is now the default stack for project %q\n", name, projectName)) return nil }, } diff --git a/src/cmd/cli/command/stack_test.go b/src/cmd/cli/command/stack_test.go index 12e5a17b1..faf21325c 100644 --- a/src/cmd/cli/command/stack_test.go +++ b/src/cmd/cli/command/stack_test.go @@ -3,10 +3,12 @@ package command import ( "bytes" "context" + "log/slog" "os" "testing" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/modes" "github.com/DefangLabs/defang/src/pkg/stacks" "github.com/DefangLabs/defang/src/pkg/term" @@ -50,6 +52,7 @@ func MockTerm(t *testing.T, stdout *bytes.Buffer, stdin *bytes.Reader) { &FakeStdout{stdout}, new(bytes.Buffer), ) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) t.Cleanup(func() { term.DefaultTerm = oldTerm }) diff --git a/src/cmd/cli/command/whoami.go b/src/cmd/cli/command/whoami.go index 96353fb31..071f56d47 100644 --- a/src/cmd/cli/command/whoami.go +++ b/src/cmd/cli/command/whoami.go @@ -1,6 +1,9 @@ package command import ( + "fmt" + "log/slog" + "github.com/DefangLabs/defang/src/pkg/auth" "github.com/DefangLabs/defang/src/pkg/cli" "github.com/DefangLabs/defang/src/pkg/cli/client" @@ -24,7 +27,7 @@ var whoamiCmd = &cobra.Command{ CheckAccountInfo: false, // because we do it inside cli.Whoami }) if err != nil { - term.Warnf("Provider account information not available: %v", err) + slog.Warn(fmt.Sprintf("Provider account information not available: %v", err)) } else { provider = session.Provider } @@ -37,7 +40,7 @@ var whoamiCmd = &cobra.Command{ userInfo, err = auth.FetchUserInfo(ctx, token) if err != nil { // Either the auth service is down, or we're using a Fabric JWT: skip workspace information - term.Warn("Workspace information unavailable:", err) + slog.Warn(fmt.Sprintln("Workspace information unavailable:", err)) } } diff --git a/src/cmd/cli/command/workspace.go b/src/cmd/cli/command/workspace.go index b88182843..201f7b90d 100644 --- a/src/cmd/cli/command/workspace.go +++ b/src/cmd/cli/command/workspace.go @@ -2,6 +2,7 @@ package command import ( "errors" + "log/slog" "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/auth" @@ -28,7 +29,7 @@ func ListWorkspaces(cmd *cobra.Command, args []string) error { rows := cli.WorkspaceRows(info, currentWorkspace) if len(rows) == 0 { - term.Info("No workspaces found for this account.") + slog.Info("No workspaces found for this account.") return nil } diff --git a/src/pkg/agent/common/common.go b/src/pkg/agent/common/common.go index 71ff060ac..175251cf3 100644 --- a/src/pkg/agent/common/common.go +++ b/src/pkg/agent/common/common.go @@ -3,10 +3,10 @@ package common import ( "errors" "fmt" + "log/slog" "os" "github.com/DefangLabs/defang/src/pkg/cli/compose" - "github.com/DefangLabs/defang/src/pkg/term" ) var MCPDevelopmentClient = "" // set by NewDefangMCPServer @@ -42,23 +42,23 @@ func ConfigureAgentLoader(params LoaderParams) (*compose.Loader, error) { projectName := params.ProjectName if projectName != "" { - term.Debugf("Project name provided: %s", projectName) - term.Debug("Function invoked: compose.NewLoader") + slog.Debug("Project name provided: " + projectName) + slog.Debug("Function invoked: compose.NewLoader") return compose.NewLoader(compose.WithProjectName(projectName)), nil } composeFilePaths := params.ComposeFilePaths if len(composeFilePaths) > 0 { - term.Debugf("Compose file paths provided: %s", composeFilePaths) - term.Debug("Function invoked: compose.NewLoader") + slog.Debug(fmt.Sprintf("Compose file paths provided: %s", composeFilePaths)) + slog.Debug("Function invoked: compose.NewLoader") return compose.NewLoader(compose.WithPath(composeFilePaths...)), nil } //TODO: Talk about using both project name and compose file paths // if projectNameOK && composeFilePathOK { - // term.Infof("Compose file paths and project name provided: %s, %s", composeFilePaths, projectName) + // slog.Info(fmt.Sprintf("Compose file paths and project name provided: %s, %s", composeFilePaths, projectName)) // return compose.NewLoader(compose.WithProjectName(projectName), compose.WithPath(composeFilePaths...)), nil // } - term.Debug("Function invoked: compose.NewLoader") + slog.Debug("Function invoked: compose.NewLoader") return compose.NewLoader(), nil } diff --git a/src/pkg/agent/generator.go b/src/pkg/agent/generator.go index 860a95693..9e1418304 100644 --- a/src/pkg/agent/generator.go +++ b/src/pkg/agent/generator.go @@ -4,8 +4,9 @@ import ( "context" "encoding/json" "errors" + "fmt" + "log/slog" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/firebase/genkit/go/ai" "github.com/firebase/genkit/go/genkit" ) @@ -73,7 +74,7 @@ func (g *Generator) HandleMessage(ctx context.Context, prompt string, maxTurns i if errors.Is(err, context.Canceled) { return err } - term.Debugf("error: %v", err) + slog.Debug(fmt.Sprintf("error: %v", err)) continue } diff --git a/src/pkg/agent/plugins/compat_oai/generate.go b/src/pkg/agent/plugins/compat_oai/generate.go index d905d10f7..316e95889 100644 --- a/src/pkg/agent/plugins/compat_oai/generate.go +++ b/src/pkg/agent/plugins/compat_oai/generate.go @@ -19,9 +19,9 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "strings" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/firebase/genkit/go/ai" "github.com/openai/openai-go" "github.com/openai/openai-go/packages/param" @@ -258,7 +258,7 @@ func (g *ModelGenerator) generateStream(ctx context.Context, handleChunk func(co if err != nil { return nil, fmt.Errorf("failed to marshal request params for debug: %w", err) } - _, _ = term.Debugf("Chat.Completions.NewStreaming: %s", string(reqParams)) + slog.Debug("Chat.Completions.NewStreaming: " + string(reqParams)) stream := g.client.Chat.Completions.NewStreaming(ctx, *g.request) defer stream.Close() diff --git a/src/pkg/agent/toolmanager.go b/src/pkg/agent/toolmanager.go index 4242b56d5..eadca843f 100644 --- a/src/pkg/agent/toolmanager.go +++ b/src/pkg/agent/toolmanager.go @@ -5,9 +5,9 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/agent/common" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/firebase/genkit/go/ai" "github.com/firebase/genkit/go/genkit" ) @@ -124,7 +124,7 @@ func (t *ToolManager) EqualPrevious(toolRequests []*ai.ToolRequest) bool { for _, req := range toolRequests { inputs, err := json.Marshal(req.Input) if err != nil { - term.Debugf("error marshaling tool request input: %v", err) + slog.Debug(fmt.Sprintf("error marshaling tool request input: %v", err)) continue } currJSON := fmt.Sprintf("%s:%s", req.Name, inputs) diff --git a/src/pkg/agent/tools/deploy.go b/src/pkg/agent/tools/deploy.go index 4a0bbf475..6b6176413 100644 --- a/src/pkg/agent/tools/deploy.go +++ b/src/pkg/agent/tools/deploy.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -14,7 +15,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/modes" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type DeployParams struct { @@ -22,7 +22,7 @@ type DeployParams struct { } func HandleDeployTool(ctx context.Context, loader client.Loader, params DeployParams, cli CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: loader.LoadProject") + slog.Debug("Function invoked: loader.LoadProject") project, err := cli.LoadProject(ctx, loader) if err != nil { err = fmt.Errorf("failed to parse compose file: %w", err) @@ -30,7 +30,7 @@ func HandleDeployTool(ctx context.Context, loader client.Loader, params DeployPa return "", fmt.Errorf("local deployment failed: %v. Please provide a valid compose file path.", err) } - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -57,9 +57,9 @@ func HandleDeployTool(ctx context.Context, loader client.Loader, params DeployPa } // Deploy the services - term.Debugf("Deploying services for project %s...", project.Name) + slog.Debug(fmt.Sprintf("Deploying services for project %s...", project.Name)) - term.Debug("Function invoked: cli.ComposeUp") + slog.Debug("Function invoked: cli.ComposeUp") // Use ComposeUp to deploy the services deployResp, project, err := cli.ComposeUp(ctx, client, provider, sc.Stack, cliTypes.ComposeUpParams{ Project: project, diff --git a/src/pkg/agent/tools/destroy.go b/src/pkg/agent/tools/destroy.go index b8944b0e9..3e848452d 100644 --- a/src/pkg/agent/tools/destroy.go +++ b/src/pkg/agent/tools/destroy.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type DestroyParams struct { @@ -19,7 +19,7 @@ type DestroyParams struct { } func HandleDestroyTool(ctx context.Context, loader client.Loader, params DestroyParams, cli CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -39,7 +39,7 @@ func HandleDestroyTool(ctx context.Context, loader client.Loader, params Destroy if err != nil { return "", fmt.Errorf("failed to setup provider: %w", err) } - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) @@ -50,7 +50,7 @@ func HandleDestroyTool(ctx context.Context, loader client.Loader, params Destroy return "", fmt.Errorf("failed to use provider: %w", err) } - term.Debug("Function invoked: cli.ComposeDown") + slog.Debug("Function invoked: cli.ComposeDown") deployment, err := cli.ComposeDown(ctx, projectName, client, provider) if err != nil { if connect.CodeOf(err) == connect.CodeNotFound { diff --git a/src/pkg/agent/tools/estimate.go b/src/pkg/agent/tools/estimate.go index 9c041e7cb..ba5d48174 100644 --- a/src/pkg/agent/tools/estimate.go +++ b/src/pkg/agent/tools/estimate.go @@ -4,12 +4,12 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/agent/common" "github.com/DefangLabs/defang/src/pkg/auth" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/modes" - "github.com/DefangLabs/defang/src/pkg/term" ) type EstimateParams struct { @@ -20,14 +20,14 @@ type EstimateParams struct { } func HandleEstimateTool(ctx context.Context, loader client.Loader, params EstimateParams, cli CLIInterface, sc StackConfig) (string, error) { - term.Debug("Function invoked: loader.LoadProject") + slog.Debug("Function invoked: loader.LoadProject") project, err := cli.LoadProject(ctx, loader) if err != nil { err = fmt.Errorf("failed to parse compose file: %w", err) return "", fmt.Errorf("failed to parse compose file: %w", err) } - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") fabric, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -51,12 +51,12 @@ func HandleEstimateTool(ctx context.Context, loader client.Loader, params Estima return "", err } - term.Debug("Function invoked: cli.RunEstimate") + slog.Debug("Function invoked: cli.RunEstimate") estimate, err := cli.RunEstimate(ctx, project, fabric, defangProvider, providerID, params.Region, deploymentMode) if err != nil { return "", fmt.Errorf("failed to run estimate: %w", err) } - term.Debugf("Estimate: %+v", estimate) + slog.Debug(fmt.Sprintf("Estimate: %+v", estimate)) estimateText := cli.PrintEstimate(deploymentMode, estimate) diff --git a/src/pkg/agent/tools/listConfig.go b/src/pkg/agent/tools/listConfig.go index 21a426f70..b34ababef 100644 --- a/src/pkg/agent/tools/listConfig.go +++ b/src/pkg/agent/tools/listConfig.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type ListConfigParams struct { @@ -20,7 +20,7 @@ type ListConfigParams struct { // HandleListConfigTool handles the list config tool logic func HandleListConfigTool(ctx context.Context, loader client.Loader, params ListConfigParams, cli CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -41,14 +41,14 @@ func HandleListConfigTool(ctx context.Context, loader client.Loader, params List return "", fmt.Errorf("failed to setup provider: %w", err) } - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) } - term.Debug("Project name loaded:", projectName) + slog.Debug(fmt.Sprintln("Project name loaded:", projectName)) - term.Debug("Function invoked: cli.ConfigList") + slog.Debug("Function invoked: cli.ConfigList") config, err := cli.ListConfig(ctx, provider, projectName) if err != nil { return "", fmt.Errorf("failed to list config variables: %w", err) diff --git a/src/pkg/agent/tools/logs.go b/src/pkg/agent/tools/logs.go index 75724970d..fa9da6c9f 100644 --- a/src/pkg/agent/tools/logs.go +++ b/src/pkg/agent/tools/logs.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "time" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -13,7 +14,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/timeutils" ) @@ -41,7 +41,7 @@ func HandleLogsTool(ctx context.Context, loader client.Loader, params LogsParams } } - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -62,12 +62,12 @@ func HandleLogsTool(ctx context.Context, loader client.Loader, params LogsParams return "", fmt.Errorf("failed to setup provider: %w", err) } - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) } - term.Debug("Project name loaded:", projectName) + slog.Debug(fmt.Sprintln("Project name loaded:", projectName)) err = cli.CanIUseProvider(ctx, client, provider, projectName, 0) if err != nil { @@ -86,7 +86,7 @@ func HandleLogsTool(ctx context.Context, loader client.Loader, params LogsParams }) if err != nil { - term.Error("Failed to fetch logs", "error", err) + slog.Error(fmt.Sprintln("Failed to fetch logs", "error", err)) return "", fmt.Errorf("failed to fetch logs: %w", err) } diff --git a/src/pkg/agent/tools/provider.go b/src/pkg/agent/tools/provider.go index 4f1c670e5..d602d4719 100644 --- a/src/pkg/agent/tools/provider.go +++ b/src/pkg/agent/tools/provider.go @@ -3,11 +3,11 @@ package tools import ( "context" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) const CreateNewStack = "Create new stack" @@ -48,7 +48,7 @@ func (pp *providerPreparer) SetupProvider(ctx context.Context, stack *stacks.Par } } - term.Debug("Function invoked: cli.NewProvider") + slog.Debug("Function invoked: cli.NewProvider") provider := pp.pc.NewProvider(ctx, stack.Provider, pp.fc, stack.Name) providerID := stack.Provider return &providerID, provider, nil diff --git a/src/pkg/agent/tools/removeConfig.go b/src/pkg/agent/tools/removeConfig.go index e16751a65..956bc817a 100644 --- a/src/pkg/agent/tools/removeConfig.go +++ b/src/pkg/agent/tools/removeConfig.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type RemoveConfigParams struct { @@ -21,7 +21,7 @@ type RemoveConfigParams struct { // HandleRemoveConfigTool handles the remove config tool logic func HandleRemoveConfigTool(ctx context.Context, loader client.Loader, params RemoveConfigParams, cli CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -41,7 +41,7 @@ func HandleRemoveConfigTool(ctx context.Context, loader client.Loader, params Re if err != nil { return "", fmt.Errorf("failed to setup provider: %w", err) } - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) diff --git a/src/pkg/agent/tools/services.go b/src/pkg/agent/tools/services.go index ac9687d61..335104e20 100644 --- a/src/pkg/agent/tools/services.go +++ b/src/pkg/agent/tools/services.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "strings" "connectrpc.com/connect" @@ -14,7 +15,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type ServicesParams struct { @@ -22,7 +22,7 @@ type ServicesParams struct { } func HandleServicesTool(ctx context.Context, loader client.Loader, params ServicesParams, cli CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cli, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -42,9 +42,9 @@ func HandleServicesTool(ctx context.Context, loader client.Loader, params Servic if err != nil { return "", fmt.Errorf("failed to setup provider: %w", err) } - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) - term.Debugf("Project name loaded: %s", projectName) + slog.Debug("Project name loaded: " + projectName) if err != nil { if strings.Contains(err.Error(), "no projects found") { return "no projects found on Playground", nil @@ -68,7 +68,7 @@ func HandleServicesTool(ctx context.Context, loader client.Loader, params Servic // Convert to JSON jsonData, jsonErr := json.Marshal(serviceResponse) if jsonErr == nil { - term.Debugf("Successfully loaded services with count: %d", len(serviceResponse)) + slog.Debug(fmt.Sprintf("Successfully loaded services with count: %d", len(serviceResponse))) return string(jsonData) + "\nIf you would like to see more details about your deployed projects, please visit the Defang portal at https://portal.defang.io/projects", nil } diff --git a/src/pkg/agent/tools/setConfig.go b/src/pkg/agent/tools/setConfig.go index 2a26fed0a..250625ed6 100644 --- a/src/pkg/agent/tools/setConfig.go +++ b/src/pkg/agent/tools/setConfig.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/agent/common" @@ -12,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type SetConfigParams struct { @@ -23,7 +23,7 @@ type SetConfigParams struct { } func HandleSetConfig(ctx context.Context, loader client.Loader, params SetConfigParams, cliInterface CLIInterface, ec elicitations.Controller, sc StackConfig) (string, error) { - term.Debug("Function invoked: cli.Connect") + slog.Debug("Function invoked: cli.Connect") client, err := GetClientWithRetry(ctx, cliInterface, sc) if err != nil { var noBrowserErr auth.ErrNoBrowser @@ -45,7 +45,7 @@ func HandleSetConfig(ctx context.Context, loader client.Loader, params SetConfig } if params.ProjectName == "" { - term.Debug("Function invoked: cli.LoadProjectNameWithFallback") + slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cliInterface.LoadProjectNameWithFallback(ctx, loader, provider) if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) @@ -63,10 +63,10 @@ func HandleSetConfig(ctx context.Context, loader client.Loader, params SetConfig return "", errors.New("Both 'random' and 'value' parameters provided; please provide only one") } value = cli.CreateRandomConfigValue() - term.Debug("Generated random value for config") + slog.Debug("Generated random value for config") } - term.Debug("Function invoked: cli.ConfigSet") + slog.Debug("Function invoked: cli.ConfigSet") if err := cliInterface.ConfigSet(ctx, params.ProjectName, provider, params.Name, value); err != nil { return "", fmt.Errorf("failed to set config: %w", err) } diff --git a/src/pkg/auth/auth.go b/src/pkg/auth/auth.go index b51d6bccc..da13893b2 100644 --- a/src/pkg/auth/auth.go +++ b/src/pkg/auth/auth.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/url" "path" "strings" @@ -72,13 +73,13 @@ func StartAuthCodeFlow(ctx context.Context, mcpFlow LoginFlow, saveToken func(st ctx := context.Background() code, err := pollForAuthCode(ctx, ar.state) if err != nil { - term.Errorf("failed to poll for auth code: %v", err) + slog.Error(fmt.Sprintf("failed to poll for auth code: %v", err)) return } token, err := ExchangeCodeForToken(ctx, AuthCodeFlow{code: code, redirectUri: redirectUri, verifier: ar.verifier}) if err != nil { - term.Errorf("failed to exchange code for token: %v", err) + slog.Error(fmt.Sprintf("failed to exchange code for token: %v", err)) return } @@ -111,12 +112,12 @@ func Poll(ctx context.Context, key string) ([]byte, error) { result, err := OpenAuthClient.Poll(ctx, key) if err != nil { if errors.Is(err, ErrPollTimeout) { - term.Debug("poll timed out, retrying...") + slog.Debug("poll timed out, retrying...") continue } var unexpectedError ErrUnexpectedStatus if errors.As(err, &unexpectedError) && unexpectedError.StatusCode >= 500 { - term.Debugf("received server error: %s, retrying in %v...", unexpectedError.Status, retryDelay) + slog.Debug(fmt.Sprintf("received server error: %s, retrying in %v...", unexpectedError.Status, retryDelay)) select { case <-ctx.Done(): return nil, ctx.Err() @@ -161,7 +162,7 @@ func ExchangeCodeForToken(ctx context.Context, code AuthCodeFlow, ss ...scope.Sc scopes = append(scopes, s.String()) } - term.Debugf("Generating access token with scopes %v", scopes) + slog.Debug(fmt.Sprintf("Generating access token with scopes %v", scopes)) token, err := OpenAuthClient.Exchange(code.code, code.redirectUri, code.verifier) // TODO: scope if err != nil { diff --git a/src/pkg/cli/cd.go b/src/pkg/cli/cd.go index 33f8601de..5d7bec497 100644 --- a/src/pkg/cli/cd.go +++ b/src/pkg/cli/cd.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "slices" "strings" @@ -21,9 +22,9 @@ import ( func CdCommand(ctx context.Context, projectName string, provider client.Provider, fabric client.FabricClient, command client.CdCommand) (types.ETag, error) { if projectName == "" { // projectName is empty for "list --remote" - term.Infof("Running CD command %q", command) + slog.Info(fmt.Sprintf("Running CD command %q", command)) } else { - term.Infof("Running CD command %q in project %q", command, projectName) + slog.Info(fmt.Sprintf("Running CD command %q in project %q", command, projectName)) } if dryrun.DoDryRun { return "", dryrun.ErrDryRun @@ -48,7 +49,7 @@ func CdCommand(ctx context.Context, projectName string, provider client.Provider case client.CdCommandDown, client.CdCommandDestroy: err := deleteSubdomain(ctx, projectName, provider, fabric) if err != nil { - term.Warn("Unable to update deployment history; deployment will proceed anyway.") + slog.Warn("Unable to update deployment history; deployment will proceed anyway.") break } // Update deployment table to mark deployment as destroyed only after successful deletion of the subdomain @@ -65,8 +66,8 @@ func CdCommand(ctx context.Context, projectName string, provider client.Provider StatesUrl: statesUrl, }) if err != nil { - term.Debug("Failed to record deployment:", err) - term.Warn("Unable to update deployment history; deployment will proceed anyway.") + slog.Debug(fmt.Sprintln("Failed to record deployment:", err)) + slog.Warn("Unable to update deployment history; deployment will proceed anyway.") } } return cd.ETag, nil @@ -80,9 +81,9 @@ func deleteSubdomain(ctx context.Context, projectName string, provider client.Pr }) if err != nil { // This can fail when the project was deployed from a different workspace than the current one - term.Debug("DeleteSubdomainZone failed:", err) + slog.Debug(fmt.Sprintln("DeleteSubdomainZone failed:", err)) if connect.CodeOf(err) == connect.CodeNotFound { - term.Warn("Subdomain not found; did you mean to destroy a different project or stack?") + slog.Warn("Subdomain not found; did you mean to destroy a different project or stack?") } return err } @@ -121,7 +122,7 @@ func TailAndWaitForCD(ctx context.Context, provider client.Provider, projectName // blocking call to tail var tailErr error if err := streamLogs(ctx, provider, projectName, tailOptions, logEntryPrintHandler); err != nil { - term.Debug("Tail stopped with", err, errors.Unwrap(err)) + slog.Debug(fmt.Sprintln("Tail stopped with", err, errors.Unwrap(err))) if !errors.Is(err, context.Canceled) { tailErr = err } @@ -136,7 +137,7 @@ func SplitProjectStack(name string) (projectName string, stackName string) { } func CdListFromStorage(ctx context.Context, provider client.Provider, allRegions bool) error { - term.Debug("Running CD list") + slog.Debug("Running CD list") if dryrun.DoDryRun { return dryrun.ErrDryRun } diff --git a/src/pkg/cli/cert.go b/src/pkg/cli/cert.go index 954958663..76f1ddeb8 100644 --- a/src/pkg/cli/cert.go +++ b/src/pkg/cli/cert.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net" "net/http" "strings" @@ -69,7 +70,7 @@ var ( ExpectContinueTimeout: 1 * time.Second, }, CheckRedirect: func(req *http.Request, via []*http.Request) error { - term.Debugf("Redirecting from %v to %v", via[len(via)-1].URL, req.URL) + slog.Debug(fmt.Sprintf("Redirecting from %v to %v", via[len(via)-1].URL, req.URL)) return nil }, } @@ -77,7 +78,7 @@ var ( ) func GenerateLetsEncryptCert(ctx context.Context, project *compose.Project, client client.FabricClient, provider client.Provider) error { - term.Debugf("Generating TLS cert for project %q", project.Name) + slog.Debug(fmt.Sprintf("Generating TLS cert for project %q", project.Name)) services, err := provider.GetServices(ctx, &defangv1.GetServicesRequest{Project: project.Name}) if err != nil { @@ -95,7 +96,7 @@ func GenerateLetsEncryptCert(ctx context.Context, project *compose.Project, clie } if service, ok := project.Services[serviceInfo.Service.Name]; ok { if service.DomainName != serviceInfo.Domainname { - term.Warnf("service %q: domainname %q in compose file does not match deployed value %q", service.Name, service.DomainName, serviceInfo.Domainname) + slog.Warn(fmt.Sprintf("service %q: domainname %q in compose file does not match deployed value %q", service.Name, service.DomainName, serviceInfo.Domainname)) } cnt++ targets := getDomainTargets(serviceInfo, service) @@ -103,14 +104,14 @@ func GenerateLetsEncryptCert(ctx context.Context, project *compose.Project, clie if defaultNetwork := service.Networks["default"]; defaultNetwork != nil { domains = append(domains, defaultNetwork.Aliases...) } - term.Debugf("Found service %v with domains %v and targets %v", service.Name, domains, targets) + slog.Debug(fmt.Sprintf("Found service %v with domains %v and targets %v", service.Name, domains, targets)) for _, domain := range domains { generateCert(ctx, domain, targets, client) } } } if cnt == 0 { - term.Infof("No `domainname` found in compose file; no HTTPS cert generation needed") + slog.Info("No `domainname` found in compose file; no HTTPS cert generation needed") } return nil @@ -132,35 +133,35 @@ func getDomainTargets(serviceInfo *defangv1.ServiceInfo, service compose.Service } func generateCert(ctx context.Context, domain string, targets []string, client client.FabricClient) { - term.Infof("Checking DNS setup for %v", domain) + slog.Info(fmt.Sprintf("Checking DNS setup for %v", domain)) if err := waitForCNAME(ctx, domain, targets, client); err != nil { - term.Errorf("Error waiting for CNAME: %v", err) + slog.Error(fmt.Sprintf("Error waiting for CNAME: %v", err)) return } - term.Infof("%v DNS is properly configured!", domain) + slog.Info(fmt.Sprintf("%v DNS is properly configured!", domain)) if err := cert.CheckTLSCert(ctx, domain); err == nil { - term.Infof("TLS cert for %v is already ready", domain) + slog.Info(fmt.Sprintf("TLS cert for %v is already ready", domain)) return } if err := pkg.SleepWithContext(ctx, 5*time.Second); err != nil { // slight delay to ensure DNS to propagate - term.Errorf("Error waiting for DNS propagation: %v", err) + slog.Error(fmt.Sprintf("Error waiting for DNS propagation: %v", err)) return } - term.Infof("Triggering cert generation for %v", domain) + slog.Info(fmt.Sprintf("Triggering cert generation for %v", domain)) if err := triggerCertGeneration(ctx, domain); err != nil { - term.Errorf("Error triggering cert generation, please try again") + slog.Error("Error triggering cert generation, please try again") return } - term.Infof("Waiting for TLS cert to be online for %v, this could take a few minutes", domain) + slog.Info(fmt.Sprintf("Waiting for TLS cert to be online for %v, this could take a few minutes", domain)) if err := waitForTLS(ctx, domain); err != nil { - term.Errorf("Error waiting for TLS to be online: %v", err) + slog.Error(fmt.Sprintf("Error waiting for TLS to be online: %v", err)) // FIXME: Add more info on how to debug, possibly provided by the server side to avoid client type detection here return } - term.Infof("TLS cert for %v is ready\n", domain) + slog.Info(fmt.Sprintf("TLS cert for %v is ready\n", domain)) } func triggerCertGeneration(ctx context.Context, domain string) error { @@ -176,7 +177,7 @@ func triggerCertGeneration(ctx context.Context, domain string) error { // Our own retry logic uses the root resolver to prevent cached DNS and retry on all non-200 errors if err := getWithRetries(ctx, fmt.Sprintf("http://%v", domain), 5); err != nil { // Retry incase of DNS error // Ignore possible tls error as cert attachment may take time - term.Debugf("Error triggering cert generation: %v", err) + slog.Debug(fmt.Sprintf("Error triggering cert generation: %v", err)) return err } return nil @@ -205,7 +206,7 @@ func waitForTLS(ctx context.Context, domain string) error { if err := cert.CheckTLSCert(timeout, domain); err == nil { return nil } else { - term.Debugf("Error checking TLS cert for %v: %v", domain, err) + slog.Debug(fmt.Sprintf("Error checking TLS cert for %v: %v", domain, err)) } } } @@ -234,24 +235,24 @@ func waitForCNAME(ctx context.Context, domain string, targets []string, client c verifyDNS := func() error { if !serverSideVerified && serverVerifyRpcFailure < 3 { if err := client.VerifyDNSSetup(ctx, &defangv1.VerifyDNSSetupRequest{Domain: domain, Targets: targets}); err == nil { - term.Debugf("Server side DNS verification for %v successful", domain) + slog.Debug(fmt.Sprintf("Server side DNS verification for %v successful", domain)) serverSideVerified = true } else { if cerr := new(connect.Error); errors.As(err, &cerr) && cerr.Code() == connect.CodeFailedPrecondition { - term.Debugf("Server side DNS verification negative result: %v", cerr.Message()) + slog.Debug(fmt.Sprintf("Server side DNS verification negative result: %v", cerr.Message())) } else { - term.Debugf("Server side DNS verification request for %v failed: %v", domain, err) + slog.Debug(fmt.Sprintf("Server side DNS verification request for %v failed: %v", domain, err)) serverVerifyRpcFailure++ } } if serverVerifyRpcFailure >= 3 { - term.Warnf("Server side DNS verification for %v failed multiple times, skipping server side DNS verification.", domain) + slog.Warn(fmt.Sprintf("Server side DNS verification for %v failed multiple times, skipping server side DNS verification.", domain)) } } if serverSideVerified || serverVerifyRpcFailure >= 3 { locallyVerified := dns.CheckDomainDNSReady(ctx, domain, targets) if serverSideVerified && !locallyVerified { - term.Warnf("DNS settings for %v are verified, but changes may take a few minutes to propagate due to caching.", domain) + slog.Warn(fmt.Sprintf("DNS settings for %v are verified, but changes may take a few minutes to propagate due to caching.", domain)) return nil } if locallyVerified { @@ -264,9 +265,9 @@ func waitForCNAME(ctx context.Context, domain string, targets []string, client c if err := verifyDNS(); err == nil { return nil } - term.Infof("Configure a CNAME or ALIAS record for the domain name: %v", domain) + slog.Info(fmt.Sprintf("Configure a CNAME or ALIAS record for the domain name: %v", domain)) term.Printf(" %v -> %v\n", domain, strings.Join(targets, " or ")) - term.Infof("Awaiting DNS record setup and propagation... This may take a while.") + slog.Info("Awaiting DNS record setup and propagation... This may take a while.") for { select { @@ -295,18 +296,18 @@ func getWithRetries(ctx context.Context, url string, tries int) error { return nil } if resp != nil && resp.Request != nil && resp.Request.URL.Scheme == "https" { - term.Debugf("cert gen request success, received redirect to %v", resp.Request.URL) + slog.Debug(fmt.Sprintf("cert gen request success, received redirect to %v", resp.Request.URL)) return nil // redirect to https indicate a successful cert generation } if err == nil { err = fmt.Errorf("HTTP: %v", resp.StatusCode) } } else if cve := new(tls.CertificateVerificationError); errors.As(err, &cve) { - term.Debugf("cert gen request success, received tls error: %v", cve) + slog.Debug(fmt.Sprintf("cert gen request success, received tls error: %v", cve)) return nil // tls error indicate a successful cert gen trigger, as it has to be redirected to https } - term.Debugf("Error fetching %v: %v, tries left %v", url, err, tries-i-1) + slog.Debug(fmt.Sprintf("Error fetching %v: %v, tries left %v", url, err, tries-i-1)) errs = append(errs, err) delay := httpRetryDelayBase << i // Simple exponential backoff diff --git a/src/pkg/cli/client/byoc/aws/alb_logs.go b/src/pkg/cli/client/byoc/aws/alb_logs.go index 3228cbdcf..9531facbb 100644 --- a/src/pkg/cli/client/byoc/aws/alb_logs.go +++ b/src/pkg/cli/client/byoc/aws/alb_logs.go @@ -8,12 +8,12 @@ import ( "fmt" "io" "iter" + "log/slog" "slices" "strings" "time" "github.com/DefangLabs/defang/src/pkg/clouds/aws/cw" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" ) @@ -34,7 +34,7 @@ func (b *ByocAws) fetchAndStreamAlbLogs(ctx context.Context, projectName string, if b.Prefix != "" { bucketPrefix = b.Prefix + "-" + bucketPrefix } - term.Debug("Query ALB logs", bucketPrefix) + slog.Debug(fmt.Sprintln("Query ALB logs", bucketPrefix)) if len(bucketPrefix) > 31 { // HACK: AWS CD truncates the ALB name to 31 characters (because of the long Terraform suffix) bucketPrefix = bucketPrefix[:31] diff --git a/src/pkg/cli/client/byoc/aws/byoc.go b/src/pkg/cli/client/byoc/aws/byoc.go index babf9950b..d4d148718 100644 --- a/src/pkg/cli/client/byoc/aws/byoc.go +++ b/src/pkg/cli/client/byoc/aws/byoc.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "iter" + "log/slog" "os" "path/filepath" "strconv" @@ -91,7 +92,7 @@ func AnnotateAwsError(err error) error { if err == nil { return nil } - term.Debug("AWS error:", err) + slog.Debug(fmt.Sprintln("AWS error:", err)) if strings.Contains(err.Error(), "missing AWS region:") { return ErrMissingAwsRegion{err} } @@ -120,11 +121,11 @@ func NewByocProvider(ctx context.Context, tenantName types.TenantLabel, stack st AWSSecretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY") switch { case AWSAccessKeyID != "" && AWSSecretAccessKey != "": - term.Warnf("Both AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY and AWS_PROFILE (%q) are set; access keys take precedence and AWS_PROFILE will be ignored", awsProfileName) + slog.Warn(fmt.Sprintf("Both AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY and AWS_PROFILE (%q) are set; access keys take precedence and AWS_PROFILE will be ignored", awsProfileName)) case AWSAccessKeyID != "" && AWSSecretAccessKey == "": - term.Warnf("Partial credentials found in env, missing: AWS_SECRET_ACCESS_KEY; using AWS_PROFILE (%q) instead", awsProfileName) + slog.Warn(fmt.Sprintf("Partial credentials found in env, missing: AWS_SECRET_ACCESS_KEY; using AWS_PROFILE (%q) instead", awsProfileName)) case AWSAccessKeyID == "" && AWSSecretAccessKey != "": - term.Warnf("Partial credentials found in env, missing: AWS_ACCESS_KEY_ID; using AWS_PROFILE (%q) instead", awsProfileName) + slog.Warn(fmt.Sprintf("Partial credentials found in env, missing: AWS_ACCESS_KEY_ID; using AWS_PROFILE (%q) instead", awsProfileName)) } } @@ -154,7 +155,7 @@ func (b *ByocAws) SetUpCD(ctx context.Context, force bool) error { return nil } - term.Debugf("Using CD image: %q", b.CDImage) + slog.Debug(fmt.Sprintf("Using CD image: %q", b.CDImage)) _, err := b.driver.SetUp(ctx, force) if err != nil { @@ -266,13 +267,13 @@ func (b *ByocAws) deploy(ctx context.Context, req *client.DeployRequest, cmd str } if b.needDockerHubCreds { - term.Debugf("Docker Hub credentials are needed for image pulls") + slog.Debug("Docker Hub credentials are needed for image pulls") dockerHubUser, dockerHubPass, err := dockerhub.GetDockerHubCredentials(ctx) if err != nil { - term.Debugf("Could not retrieve Docker Hub credentials: %v", err) - term.Warnf("Docker Hub credentials are required to avoid pull throttling. Please run `docker login` or set the DOCKERHUB_USERNAME and DOCKERHUB_TOKEN environment variables. Without valid credentials, image pulls may be rate-limited or fail.") + slog.Debug(fmt.Sprintf("Could not retrieve Docker Hub credentials: %v", err)) + slog.Warn("Docker Hub credentials are required to avoid pull throttling. Please run `docker login` or set the DOCKERHUB_USERNAME and DOCKERHUB_TOKEN environment variables. Without valid credentials, image pulls may be rate-limited or fail.") } else { - term.Debugf("Using Docker Hub credentials with user %v", dockerHubUser) + slog.Debug(fmt.Sprintf("Using Docker Hub credentials with user %v", dockerHubUser)) cdCmd.dockerHubUsername = dockerHubUser cdCmd.dockerHubAccessToken = dockerHubPass } @@ -288,7 +289,7 @@ func (b *ByocAws) deploy(ctx context.Context, req *client.DeployRequest, cmd str for _, si := range serviceInfos { if si.UseAcmeCert { - term.Infof("To activate TLS certificate for %v, run 'defang cert gen'", si.Domainname) + slog.Info(fmt.Sprintf("To activate TLS certificate for %v, run 'defang cert gen'", si.Domainname)) } } @@ -364,7 +365,7 @@ func (b *ByocAws) checkRequiresDockerHubToken(ctx context.Context, project *comp found, err := b.driver.CheckImageExistOnPublicECR(ctx, ecrRepo, tag) if err != nil { - term.Debugf("Error checking image %q on Public ECR: %v, assuming credentials needed", image, err) + slog.Debug(fmt.Sprintf("Error checking image %q on Public ECR: %v, assuming credentials needed", image, err)) found = false } if !found { @@ -378,7 +379,7 @@ func (b *ByocAws) checkRequiresDockerHubToken(ctx context.Context, project *comp } if len(missingDockerhubImages) > 0 { b.needDockerHubCreds = true - term.Debugf("Docker Hub images not found on Public ECR: %v", missingDockerhubImages) + slog.Debug(fmt.Sprintf("Docker Hub images not found on Public ECR: %v", missingDockerhubImages)) track.Evt("NeedsDockerHubCreds", track.P("images", strings.Join(missingDockerhubImages, ","))) } return nil @@ -411,7 +412,7 @@ func (b *ByocAws) findZone(ctx context.Context, domain, roleARN string) (string, return "", err } if len(zones) > 1 { - term.Warnf("Multiple hosted zones found for domain %q, using the first one: %v", domain, zones[0].Id) + slog.Warn(fmt.Sprintf("Multiple hosted zones found for domain %q, using the first one: %v", domain, zones[0].Id)) } return *zones[0].Id, nil } @@ -551,10 +552,10 @@ func (b *ByocAws) runCdCommand(ctx context.Context, cmd cdCommand) (awscodebuild if cmd.dockerHubUsername != "" && cmd.dockerHubAccessToken != "" { arn, err := b.putDockerHubSecret(ctx, cmd.project, cmd.dockerHubUsername, cmd.dockerHubAccessToken) if err != nil { - term.Warnf("Could not store Docker Hub credentials in Secrets Manager, images from dockerhub may be throttled during build: %v", err) + slog.Warn(fmt.Sprintf("Could not store Docker Hub credentials in Secrets Manager, images from dockerhub may be throttled during build: %v", err)) } else { env["CI_REGISTRY_CREDENTIALS_ARN"] = arn - term.Debugf("Stored Docker Hub credentials in Secrets Manager: %s", arn) + slog.Debug("Stored Docker Hub credentials in Secrets Manager: " + arn) } } @@ -595,7 +596,7 @@ func (b *ByocAws) GetProjectUpdate(ctx context.Context, projectName string) (*de // FillOutputs might fail if the stack is not created yet; return ErrNotExist (no bucket = no services yet) var cfnErr *cfn.ErrStackNotFoundException if errors.As(err, &cfnErr) { - term.Debugf("FillOutputs: %v", err) + slog.Debug(fmt.Sprintf("FillOutputs: %v", err)) return nil, client.ErrNotExist // no bucket = no services yet } return nil, AnnotateAwsError(err) @@ -611,14 +612,14 @@ func (b *ByocAws) GetProjectUpdate(ctx context.Context, projectName string) (*de s3Client := aws.NewS3FromConfig(cfg) path := b.GetProjectUpdatePath(projectName) - term.Debug("Getting services from bucket:", bucketName, path) + slog.Debug(fmt.Sprintln("Getting services from bucket:", bucketName, path)) getObjectOutput, err := s3Client.GetObject(ctx, &s3.GetObjectInput{ Bucket: &bucketName, Key: &path, }) if err != nil { if aws.IsS3NoSuchKeyError(err) { - term.Debug("s3.GetObject:", err) + slog.Debug(fmt.Sprintln("s3.GetObject:", err)) return nil, client.ErrNotExist // no services yet } return nil, AnnotateAwsError(err) @@ -658,14 +659,14 @@ func (b *ByocAws) getSecretID(projectName, name string) string { func (b *ByocAws) PutConfig(ctx context.Context, secret *defangv1.PutConfigRequest) error { fqn := b.getSecretID(secret.Project, secret.Name) - term.Debugf("Putting parameter %q", fqn) + slog.Debug(fmt.Sprintf("Putting parameter %q", fqn)) err := b.driver.PutSecret(ctx, fqn, secret.Value) return AnnotateAwsError(err) } func (b *ByocAws) ListConfig(ctx context.Context, req *defangv1.ListConfigsRequest) (*defangv1.Secrets, error) { prefix := b.getSecretID(req.Project, "") - term.Debugf("Listing parameters with prefix %q", prefix) + slog.Debug(fmt.Sprintf("Listing parameters with prefix %q", prefix)) awsSecrets, err := b.driver.ListSecretsByPrefix(ctx, prefix) if err != nil { return nil, err @@ -696,7 +697,7 @@ func (b *ByocAws) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (ite // if the cloud formation stack has been destroyed, we can still query // logs for builds and services if err := b.driver.FillOutputs(ctx); err != nil { - term.Warnf("Unable to show CD logs: %v", err) // TODO: could skip this warning if the user wasn't asking for CD logs + slog.Warn(fmt.Sprintf("Unable to show CD logs: %v", err)) // TODO: could skip this warning if the user wasn't asking for CD logs } cfg, err := b.driver.LoadConfig(ctx) @@ -739,7 +740,7 @@ func (b *ByocAws) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (ite // Ignore ResourceNotFoundException errors which can only happen if a log stream is missing during Query var resourceNotFound *cwTypes.ResourceNotFoundException if errors.As(err, &resourceNotFound) { - term.Debugf("Log stream not found while tailing, skipping: %v", err) + slog.Debug(fmt.Sprintf("Log stream not found while tailing, skipping: %v", err)) continue } if !yield(nil, AnnotateAwsError(err)) { @@ -817,7 +818,7 @@ func (b *ByocAws) queryOrTailLogs(ctx context.Context, cwClient cw.LogsClient, r if len(req.Services) == 0 { albIter, err := b.fetchAndStreamAlbLogs(ctx, req.Project, start, end, req.Pattern) if err != nil { - term.Debugf("Failed to fetch ALB logs: %v", err) + slog.Debug(fmt.Sprintf("Failed to fetch ALB logs: %v", err)) } else { logSeq = cw.MergeLogEvents(logSeq, albIter) if req.Limit > 0 { @@ -850,7 +851,7 @@ func (b *ByocAws) getLogGroupInputs(etag types.ETag, projectName, service, filte // Tail CD and builds if logType.Has(logs.LogTypeCD) { if b.driver.LogGroupARN == "" { - term.Debug("CD stack LogGroupARN is not set; skipping CD logs") + slog.Debug("CD stack LogGroupARN is not set; skipping CD logs") } else { cdTail := cw.LogGroupInput{LogGroupARN: b.driver.LogGroupARN, LogEventFilterPattern: pattern} // If we know the CD task ARN, only tail the logstream for that CD task; FIXME: store the task ID in the project's ProjectUpdate in S3 and use that @@ -858,15 +859,15 @@ func (b *ByocAws) getLogGroupInputs(etag types.ETag, projectName, service, filte cdTail.LogStreamNames = []string{awscodebuild.GetLogStreamForBuildID(b.cdBuildId)} } groups = append(groups, cdTail) - term.Debug("Query CD logs", cdTail.LogGroupARN, cdTail.LogStreamNames, filter) + slog.Debug(fmt.Sprintln("Query CD logs", cdTail.LogGroupARN, cdTail.LogStreamNames, filter)) } } if logType.Has(logs.LogTypeBuild) && projectName != "" { buildsTail := cw.LogGroupInput{LogGroupARN: b.makeLogGroupARN(b.StackDir(projectName, "builds")), LogEventFilterPattern: pattern} // must match logic in ecs/common.ts; TODO: filter by etag/service - term.Debug("Query builds logs", buildsTail.LogGroupARN, filter) + slog.Debug(fmt.Sprintln("Query builds logs", buildsTail.LogGroupARN, filter)) groups = append(groups, buildsTail) ecsTail := cw.LogGroupInput{LogGroupARN: b.makeLogGroupARN(b.StackDir(projectName, "ecs")), LogEventFilterPattern: pattern} // must match logic in ecs/common.ts; TODO: filter by etag/service/deploymentId - term.Debug("Query ecs events logs", ecsTail.LogGroupARN, filter) + slog.Debug(fmt.Sprintln("Query ecs events logs", ecsTail.LogGroupARN, filter)) groups = append(groups, ecsTail) } // Tail services @@ -875,7 +876,7 @@ func (b *ByocAws) getLogGroupInputs(etag types.ETag, projectName, service, filte if service != "" && etag != "" { servicesTail.LogStreamNamePrefix = service + "/" + service + "_" + etag } - term.Debug("Query services logs", servicesTail.LogGroupARN, servicesTail.LogStreamNamePrefix, pattern) + slog.Debug(fmt.Sprintln("Query services logs", servicesTail.LogGroupARN, servicesTail.LogStreamNamePrefix, pattern)) groups = append(groups, servicesTail) } return groups @@ -902,7 +903,7 @@ func (b *ByocAws) UpdateServiceInfo(ctx context.Context, si *defangv1.ServiceInf } func (b *ByocAws) TearDownCD(ctx context.Context) error { - term.Warn("Deleting the Defang CD cluster; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") + slog.Warn("Deleting the Defang CD cluster; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") return b.driver.TearDown(ctx) } @@ -933,7 +934,7 @@ func (b *ByocAws) DeleteConfig(ctx context.Context, secrets *defangv1.Secrets) e for i, name := range secrets.Names { ids[i] = b.getSecretID(secrets.Project, name) } - term.Debug("Deleting parameters", ids) + slog.Debug(fmt.Sprintln("Deleting parameters", ids)) if err := b.driver.DeleteSecrets(ctx, ids...); err != nil { return AnnotateAwsError(err) } @@ -962,7 +963,7 @@ func (b *ByocAws) CdList(ctx context.Context, allRegions bool) (iter.Seq[state.I func (b *ByocAws) Subscribe(ctx context.Context, req *defangv1.SubscribeRequest) (iter.Seq2[*defangv1.SubscribeResponse, error], error) { if err := b.driver.FillOutputs(ctx); err != nil { - term.Warnf("Unable to get log group ARNs: %v", err) + slog.Warn(fmt.Sprintf("Unable to get log group ARNs: %v", err)) } cfg, err := b.driver.LoadConfig(ctx) diff --git a/src/pkg/cli/client/byoc/aws/byoc_test.go b/src/pkg/cli/client/byoc/aws/byoc_test.go index 11a3c7e6e..8713138b3 100644 --- a/src/pkg/cli/client/byoc/aws/byoc_test.go +++ b/src/pkg/cli/client/byoc/aws/byoc_test.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -360,6 +361,7 @@ aws_secret_access_key = wJalrXUtnFEMI/KDEFANG/bPxRfiCYEXAMPLEKEY for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) if tt.configFiles { // Point AWS SDK to our fake config files diff --git a/src/pkg/cli/client/byoc/aws/domain.go b/src/pkg/cli/client/byoc/aws/domain.go index 254595d43..c8cd1cda4 100644 --- a/src/pkg/cli/client/byoc/aws/domain.go +++ b/src/pkg/cli/client/byoc/aws/domain.go @@ -3,11 +3,12 @@ package aws import ( "context" "errors" + "fmt" + "log/slog" "strings" "github.com/DefangLabs/defang/src/pkg/clouds/aws" "github.com/DefangLabs/defang/src/pkg/dns" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/aws/aws-sdk-go-v2/service/route53/types" ) @@ -27,7 +28,7 @@ func prepareDomainDelegation(ctx context.Context, projectDomain, projectName, st if !errors.Is(err, aws.ErrZoneNotFound) { return nil, "", err // TODO: we should not fail deployment if GetHostedZonesByName fails } - term.Debugf("Zone %q not found, delegation set will be created", projectDomain) + slog.Debug(fmt.Sprintf("Zone %q not found, delegation set will be created", projectDomain)) } else { // Case 2: Get the NS records for the existing subdomain zone delegationSet, err = getOrCreateDelegationSetByZones(ctx, zones, projectName, stackName, r53Client) @@ -42,10 +43,10 @@ func prepareDomainDelegation(ctx context.Context, projectDomain, projectName, st // but this is acceptable because the next time the zone is deployed, we'll get the existing delegation set from the zone. delegationSet, err = findUsableDelegationSet(ctx, projectDomain, r53Client, resolverAt) if err != nil { - term.Warnf("Failed to find existing usable delegation set: %v, creating a new one", err) + slog.Warn(fmt.Sprintf("Failed to find existing usable delegation set: %v, creating a new one", err)) } if delegationSet != nil { - term.Debug("Reusing existing usable Route53 delegation set:", *delegationSet.Id) + slog.Debug(fmt.Sprintln("Reusing existing usable Route53 delegation set:", *delegationSet.Id)) } else { delegationSet, err = createUsableDelegationSet(ctx, projectDomain, r53Client, resolverAt) if err != nil { @@ -58,7 +59,7 @@ func prepareDomainDelegation(ctx context.Context, projectDomain, projectName, st return nil, "", errors.New("no NS records found for the delegation set") // should not happen } if delegationSet.Id != nil { - term.Debug("Route53 delegation set ID:", *delegationSet.Id) + slog.Debug(fmt.Sprintln("Route53 delegation set ID:", *delegationSet.Id)) delegationSetId = strings.TrimPrefix(*delegationSet.Id, "/delegationset/") } @@ -87,7 +88,7 @@ func findUsableDelegationSet(ctx context.Context, domain string, r53Client aws.R if len(hostedZones) >= 100 { // A delegation set can only be associated with up to 100 hosted zones by default // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-entities-hosted-zones) - term.Debugf("Delegation set %q has reached the maximum number of hosted zones (100), skipping", *delegationSet.Id) + slog.Debug(fmt.Sprintf("Delegation set %q has reached the maximum number of hosted zones (100), skipping", *delegationSet.Id)) continue } return &delegationSet, nil @@ -119,7 +120,7 @@ func createUsableDelegationSet(ctx context.Context, domain string, r53Client aws // up to 100 delegation sets can be created per account, failure is non-fatal // there is no direct actionable remedy for the user too. // TODO: find and reuse empty delegation sets to avoid hitting the limit - term.Debugf("Failed to delete conflicting delegation set %q: %v", *delegationSet.Id, err) + slog.Debug(fmt.Sprintf("Failed to delete conflicting delegation set %q: %v", *delegationSet.Id, err)) } } else { return delegationSet, nil @@ -137,7 +138,7 @@ func nameServersHasConflict(ctx context.Context, nameServers []string, domains [ return false, err } else if len(records) > 0 { // Records found, meaning the NS server is conflicting - term.Debugf("Name server %q has conflicting records for domain %q: %v", nsServer, domain, records) + slog.Debug(fmt.Sprintf("Name server %q has conflicting records for domain %q: %v", nsServer, domain, records)) return true, nil } } @@ -155,7 +156,7 @@ func getOrCreateDelegationSetByZones(ctx context.Context, zones []*types.HostedZ } // Ignore zones that were created by an older CLI (2a), or another way (2c) or belong to a different project/stack (2d) if tags["defang:project"] != projectName || tags["defang:stack"] != stackName { - term.Debugf("ignored zone %q as it belongs to a different project/stack (%q/%q), skipping", projectDomain, tags["defang:project"], tags["defang:stack"]) + slog.Debug(fmt.Sprintf("ignored zone %q as it belongs to a different project/stack (%q/%q), skipping", projectDomain, tags["defang:project"], tags["defang:stack"])) continue } @@ -164,7 +165,7 @@ func getOrCreateDelegationSetByZones(ctx context.Context, zones []*types.HostedZ // Create or get the reusable delegation set for the existing subdomain zone delegationSet, err = aws.CreateDelegationSet(ctx, zone.Id, r53Client) if delegationSetAlreadyReusable := new(types.DelegationSetAlreadyReusable); errors.As(err, &delegationSetAlreadyReusable) { - term.Debug("Route53 delegation set already created:", err) + slog.Debug(fmt.Sprintln("Route53 delegation set already created:", err)) delegationSet, err = aws.GetDelegationSetByZone(ctx, zone.Id, r53Client) } if err != nil { diff --git a/src/pkg/cli/client/byoc/aws/list.go b/src/pkg/cli/client/byoc/aws/list.go index 67462cca9..c3ed399a0 100644 --- a/src/pkg/cli/client/byoc/aws/list.go +++ b/src/pkg/cli/client/byoc/aws/list.go @@ -2,15 +2,16 @@ package aws import ( "context" + "fmt" "io" "iter" + "log/slog" "strings" "sync" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/state" "github.com/DefangLabs/defang/src/pkg/clouds/aws" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" ) @@ -61,7 +62,7 @@ type S3Client interface { func ListPulumiStacks(ctx context.Context, s3client S3Client, bucketName string) (iter.Seq[state.PulumiState], error) { prefix := `.pulumi/stacks/` // TODO: should we filter on `projectName`? - term.Debug("Listing stacks in bucket:", bucketName) + slog.Debug(fmt.Sprintln("Listing stacks in bucket:", bucketName)) out, err := s3client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ Bucket: &bucketName, Prefix: &prefix, @@ -85,7 +86,7 @@ func ListPulumiStacks(ctx context.Context, s3client S3Client, bucketName string) return io.ReadAll(getObjectOutput.Body) }) if err != nil { - term.Debugf("Skipping %q in bucket %s: %v", *obj.Key, bucketName, AnnotateAwsError(err)) + slog.Debug(fmt.Sprintf("Skipping %q in bucket %s: %v", *obj.Key, bucketName, AnnotateAwsError(err))) continue } if state != nil { @@ -127,7 +128,7 @@ func (b *ByocAws) listPulumiStacksAllRegions(ctx context.Context, s3client S3Cli Bucket: bucket.Name, }) if err != nil { - term.Debugf("Skipping bucket %s: failed to get location: %v", *bucket.Name, AnnotateAwsError(err)) + slog.Debug(fmt.Sprintf("Skipping bucket %s: failed to get location: %v", *bucket.Name, AnnotateAwsError(err))) continue } diff --git a/src/pkg/cli/client/byoc/aws/stream.go b/src/pkg/cli/client/byoc/aws/stream.go index af4814524..d1fcabfbf 100644 --- a/src/pkg/cli/client/byoc/aws/stream.go +++ b/src/pkg/cli/client/byoc/aws/stream.go @@ -2,6 +2,8 @@ package aws import ( "encoding/json" + "fmt" + "log/slog" "regexp" "slices" "strings" @@ -11,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/clouds/aws/cw" "github.com/DefangLabs/defang/src/pkg/clouds/aws/ecs" "github.com/DefangLabs/defang/src/pkg/logs" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" "google.golang.org/protobuf/types/known/timestamppb" @@ -90,7 +91,7 @@ func (p *logEventParser) parseEvents(events []cw.LogEvent) *defangv1.TailRespons } break } - term.Debugf("unrecognized log stream format: %s", *first.LogStreamName) + slog.Debug("unrecognized log stream format: " + *first.LogStreamName) return nil // skip, ignore sidecar logs (like route53-sidecar or fluentbit) } @@ -123,7 +124,7 @@ func (p *logEventParser) parseEvents(events []cw.LogEvent) *defangv1.TailRespons } else if parseECSEventRecords { evt, err := ecs.ParseECSEvent([]byte(*event.Message)) if err != nil { - term.Debugf("error parsing ECS event, output raw event log: %v", err) + slog.Debug(fmt.Sprintf("error parsing ECS event, output raw event log: %v", err)) } else { entry.Service = evt.Service() entry.Etag = evt.Etag() diff --git a/src/pkg/cli/client/byoc/aws/subscribe.go b/src/pkg/cli/client/byoc/aws/subscribe.go index 9d5aa29aa..e61de1dde 100644 --- a/src/pkg/cli/client/byoc/aws/subscribe.go +++ b/src/pkg/cli/client/byoc/aws/subscribe.go @@ -1,14 +1,15 @@ package aws import ( + "fmt" "iter" + "log/slog" "slices" "strings" "github.com/DefangLabs/defang/src/pkg/clouds/aws/codebuild" "github.com/DefangLabs/defang/src/pkg/clouds/aws/cw" "github.com/DefangLabs/defang/src/pkg/clouds/aws/ecs" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -54,7 +55,7 @@ func parseSubscribeEvent(evt cw.LogEvent, etag types.ETag, services []string) *d func parseECSSubscribeEvent(evt cw.LogEvent, etag types.ETag, services []string) *defangv1.SubscribeResponse { ecsEvt, err := ecs.ParseECSEvent([]byte(*evt.Message)) if err != nil { - term.Debugf("error parsing ECS event: %v", err) + slog.Debug(fmt.Sprintf("error parsing ECS event: %v", err)) return nil } diff --git a/src/pkg/cli/client/byoc/baseclient.go b/src/pkg/cli/client/byoc/baseclient.go index ca816b12d..0310056da 100644 --- a/src/pkg/cli/client/byoc/baseclient.go +++ b/src/pkg/cli/client/byoc/baseclient.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "iter" + "log/slog" "strings" "github.com/DefangLabs/defang/src/pkg" @@ -13,7 +14,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/compose" "github.com/DefangLabs/defang/src/pkg/dns" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" composeTypes "github.com/compose-spec/compose-go/v2/types" @@ -109,7 +109,7 @@ func (b *ByocBaseClient) RemoteProjectName(ctx context.Context) (string, error) if len(projectNames) > 1 { return "", ErrMultipleProjects{ProjectNames: projectNames} } - term.Debug("Using default project:", projectNames[0]) + slog.Debug(fmt.Sprintln("Using default project:", projectNames[0])) return projectNames[0], nil } diff --git a/src/pkg/cli/client/byoc/common.go b/src/pkg/cli/client/byoc/common.go index 224c628c2..407a2757f 100644 --- a/src/pkg/cli/client/byoc/common.go +++ b/src/pkg/cli/client/byoc/common.go @@ -3,13 +3,14 @@ package byoc import ( "context" "errors" + "fmt" + "log/slog" "os" "os/exec" "path/filepath" "strings" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" ) const ( @@ -44,7 +45,7 @@ func GetPulumiBackend(stateUrl string) (string, string, error) { } func runLocalCommand(ctx context.Context, dir string, env []string, cmd ...string) error { - term.Debug("Running local command `", cmd, "` in dir ", dir) + slog.Debug(fmt.Sprintln("Running local command `", cmd, "` in dir ", dir)) // TODO - use enums to define commands instead of passing strings down from the caller // #nosec G204 command := exec.CommandContext(ctx, cmd[0], cmd[1:]...) @@ -58,7 +59,7 @@ func runLocalCommand(ctx context.Context, dir string, env []string, cmd ...strin func DebugPulumiNodeJS(ctx context.Context, env []string, cmd ...string) error { // Locally we use the "dev" script from package.json to run Pulumi commands, which uses ts-node localCmd := append([]string{"npm", "run", "dev"}, cmd...) - term.Debug(strings.Join(append(env, localCmd...), " ")) + slog.Debug(strings.Join(append(env, localCmd...), " ")) dir := os.Getenv("DEFANG_PULUMI_DIR") if dir == "" { @@ -79,7 +80,7 @@ func DebugPulumiNodeJS(ctx context.Context, env []string, cmd ...string) error { func DebugPulumiGolang(ctx context.Context, env []string, cmd ...string) error { localCmd := append([]string{"go", "run", "./..."}, cmd...) - term.Debug(strings.Join(append(env, localCmd...), " ")) + slog.Debug(strings.Join(append(env, localCmd...), " ")) dir := os.Getenv("DEFANG_PULUMI_DIR") if dir == "" { diff --git a/src/pkg/cli/client/byoc/do/byoc.go b/src/pkg/cli/client/byoc/do/byoc.go index e23d183e3..feb844713 100644 --- a/src/pkg/cli/client/byoc/do/byoc.go +++ b/src/pkg/cli/client/byoc/do/byoc.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io" + "log/slog" "iter" "net/url" @@ -109,7 +110,7 @@ func (b *ByocDo) GetProjectUpdate(ctx context.Context, projectName string) (*def if err != nil { if aws.IsS3NoSuchKeyError(err) { - term.Debug("s3.GetObject:", err) + slog.Debug(fmt.Sprintln("s3.GetObject:", err)) return nil, client.ErrNotExist // no services yet } return nil, awsbyoc.AnnotateAwsError(err) @@ -426,7 +427,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter if deploymentID == "" || appID == "" { //Look up the CD app directly instead of relying on the etag - term.Debug("Fetching app and deployment ID for app", appPlatform.CdName) + slog.Debug(fmt.Sprintln("Fetching app and deployment ID for app", appPlatform.CdName)) cdApp, err := b.getAppByName(ctx, appPlatform.CdName) if err != nil { return nil, err @@ -446,7 +447,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter return nil, errors.New("no deployments found") } - term.Info("Waiting for CD command to finish gathering logs") + slog.Info("Waiting for CD command to finish gathering logs") for { deploymentInfo, _, err := b.client.Apps.GetDeployment(ctx, appID, deploymentID) if err != nil { @@ -455,7 +456,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter logType := logs.LogType(req.LogType) - term.Debugf("Deployment phase: %s", deploymentInfo.GetPhase()) + slog.Debug(fmt.Sprintf("Deployment phase: %s", deploymentInfo.GetPhase())) switch deploymentInfo.GetPhase() { case godo.DeploymentPhase_PendingBuild, godo.DeploymentPhase_PendingDeploy, godo.DeploymentPhase_Deploying: // Do nothing; check again in 10 seconds @@ -496,7 +497,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter } func (b *ByocDo) TearDownCD(ctx context.Context) error { - term.Warn("Deleting the Defang CD app; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") + slog.Warn("Deleting the Defang CD app; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") app, err := b.getAppByName(ctx, appPlatform.CdName) if err != nil { return err @@ -699,7 +700,7 @@ func (b *ByocDo) SetUpCD(ctx context.Context, force bool) error { if resp.StatusCode != 404 { return err } - term.Debug("Creating new registry") + slog.Debug("Creating new registry") // Create registry if it doesn't exist registry, _, err = b.client.Registry.Create(ctx, &godo.RegistryCreateRequest{ Name: pkg.RandomID(), // has to be globally unique diff --git a/src/pkg/cli/client/byoc/gcp/byoc.go b/src/pkg/cli/client/byoc/gcp/byoc.go index 47445ff96..4161e2cd0 100644 --- a/src/pkg/cli/client/byoc/gcp/byoc.go +++ b/src/pkg/cli/client/byoc/gcp/byoc.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "errors" "fmt" + "log/slog" "iter" "os" @@ -174,7 +175,7 @@ func (b *ByocGcp) SetUpCD(ctx context.Context, force bool) error { } // TODO: Handle project creation flow - term.Infof("Setting up defang CD in GCP project %s, this could take a few minutes", b.driver.GetProjectID()) + slog.Info(fmt.Sprintf("Setting up defang CD in GCP project %s, this could take a few minutes", b.driver.GetProjectID())) // 1. Enable required APIs // TODO: enable minimum APIs needed for bootstrap the cd image, let CD enable the rest of the APIs apis := []string{ @@ -282,7 +283,7 @@ func (b *ByocGcp) SetUpCD(ctx context.Context, force bool) error { } } - term.Debugf("Using CD image: %q", b.CDImage) + slog.Debug(fmt.Sprintf("Using CD image: %q", b.CDImage)) b.SetupDone = true return nil @@ -310,7 +311,7 @@ func (b *ByocGcp) CdList(ctx context.Context, _allRegions bool) (iter.Seq[state. prefix := `.pulumi/stacks/` // TODO: should we filter on `projectName`? uploadSA := b.driver.GetServiceAccountEmail(DefangUploadServiceAccountName) - term.Debug("Getting services from pulumi stacks bucket:", bucketName, prefix, uploadSA) + slog.Debug(fmt.Sprintln("Getting services from pulumi stacks bucket:", bucketName, prefix, uploadSA)) objLoader := func(ctx context.Context, bucket, object string) ([]byte, error) { return b.driver.GetBucketObjectWithServiceAccount(ctx, bucket, object, uploadSA) } @@ -321,12 +322,12 @@ func (b *ByocGcp) CdList(ctx context.Context, _allRegions bool) (iter.Seq[state. return func(yield func(state.Info) bool) { for obj, err := range seq { if err != nil { - term.Debugf("Error listing object in bucket %s: %v", bucketName, annotateGcpError(err)) + slog.Debug(fmt.Sprintf("Error listing object in bucket %s: %v", bucketName, annotateGcpError(err))) continue } st, err := state.ParsePulumiStateFile(ctx, gcpObj{obj}, bucketName, objLoader) if err != nil { - term.Debugf("Skipping %q in bucket %s: %v", obj.Name, bucketName, annotateGcpError(err)) + slog.Debug(fmt.Sprintf("Skipping %q in bucket %s: %v", obj.Name, bucketName, annotateGcpError(err))) continue } if st == nil { @@ -487,7 +488,7 @@ func (b *ByocGcp) runCdCommand(ctx context.Context, cmd cdCommand) (string, erro if err != nil { return "", err } - term.Debugf("Starting CD in cloudbuild at: %v", time.Now().Format(time.RFC3339)) + slog.Debug(fmt.Sprintf("Starting CD in cloudbuild at: %v", time.Now().Format(time.RFC3339))) buildId, err := b.driver.RunCloudBuild(ctx, gcp.CloudBuildArgs{ Steps: string(steps), ServiceAccount: &b.cdServiceAccount, @@ -690,7 +691,7 @@ func (e ConflictDelegateDomainError) Error() string { } func (b *ByocGcp) PrepareDomainDelegation(ctx context.Context, req client.PrepareDomainDelegationRequest) (*client.PrepareDomainDelegationResponse, error) { - term.Debugf("Preparing domain delegation for %s", req.DelegateDomain) + slog.Debug("Preparing domain delegation for " + req.DelegateDomain) name := "defang-" + dns.SafeLabel(req.DelegateDomain) if zone, err := b.driver.EnsureDNSZoneExists(ctx, name, req.DelegateDomain, "defang delegate domain"); err != nil { if apiErr := new(googleapi.Error); errors.As(err, &apiErr) { @@ -710,7 +711,7 @@ func (b *ByocGcp) PrepareDomainDelegation(ctx context.Context, req client.Prepar return nil, annotateGcpError(err) } else { b.delegateDomainZone = zone.Name - term.Debugf("Zone %s created with nameservers %v", zone.Name, zone.NameServers) + slog.Debug(fmt.Sprintf("Zone %s created with nameservers %v", zone.Name, zone.NameServers)) return &client.PrepareDomainDelegationResponse{ NameServers: zone.NameServers, }, nil @@ -720,7 +721,7 @@ func (b *ByocGcp) PrepareDomainDelegation(ctx context.Context, req client.Prepar func (b *ByocGcp) DeleteConfig(ctx context.Context, req *defangv1.Secrets) error { for _, name := range req.Names { secretId := b.resourceName(req.Project, name) - term.Debugf("Deleting secret %q", secretId) + slog.Debug(fmt.Sprintf("Deleting secret %q", secretId)) if err := b.driver.DeleteSecret(ctx, secretId); err != nil { return fmt.Errorf("failed to delete secret %q: %w", secretId, err) } @@ -749,7 +750,7 @@ func (b *ByocGcp) ListConfig(ctx context.Context, req *defangv1.ListConfigsReque func (b *ByocGcp) PutConfig(ctx context.Context, req *defangv1.PutConfigRequest) error { secretId := b.resourceName(req.Project, req.Name) - term.Debugf("Creating secret %q", secretId) + slog.Debug(fmt.Sprintf("Creating secret %q", secretId)) if _, err := b.driver.CreateSecret(ctx, secretId); err != nil { if gcp.IsAccessNotEnabled(err) { @@ -760,13 +761,13 @@ func (b *ByocGcp) PutConfig(ctx context.Context, req *defangv1.PutConfigRequest) } if err != nil { if stat, ok := status.FromError(err); ok && stat.Code() == codes.AlreadyExists { - term.Debugf("Secret %q already exists", secretId) + slog.Debug(fmt.Sprintf("Secret %q already exists", secretId)) } else { return fmt.Errorf("failed to create secret %q: %w", secretId, err) } } } - term.Debugf("Adding a new secret version for %q", secretId) + slog.Debug(fmt.Sprintf("Adding a new secret version for %q", secretId)) if _, err := b.driver.AddSecretVersion(ctx, secretId, []byte(req.Value)); err != nil { return fmt.Errorf("failed to add secret version for %q: %w", secretId, err) } @@ -821,7 +822,7 @@ func LogEntriesToString(logEntries []*loggingpb.LogEntry) string { } func (b *ByocGcp) TearDownCD(ctx context.Context) error { - // term.Warn("Deleting Defang CD; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") + // slog.Warn("Deleting Defang CD; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") // FIXME: implement return client.ErrNotImplemented("GCP TearDown") } @@ -842,10 +843,10 @@ func (b *ByocGcp) GetProjectUpdate(ctx context.Context, projectName string) (*de // Current user might not have object viewer access to the bucket, use the upload service account to get the object uploadSA := b.driver.GetServiceAccountEmail(DefangUploadServiceAccountName) - term.Debug("Getting services from bucket:", bucketName, path, uploadSA) + slog.Debug(fmt.Sprintln("Getting services from bucket:", bucketName, path, uploadSA)) pbBytes, err := b.driver.GetBucketObjectWithServiceAccount(ctx, bucketName, path, uploadSA) if err != nil { - term.Debugf("Failed to get project bucket object from bucket %q at path %q with service account %q: %v", bucketName, path, uploadSA, err) + slog.Debug(fmt.Sprintf("Failed to get project bucket object from bucket %q at path %q with service account %q: %v", bucketName, path, uploadSA, err)) // Handle the case where the object does not exist, or where we do not have permission to view the object, ie. // "Permission 'iam.serviceAccounts.getAccessToken' denied on resource (or it may not exist)." #2051 if errors.Is(err, gcp.ErrObjectNotExist) || strings.Contains(err.Error(), "(or it may not exist)") { diff --git a/src/pkg/cli/client/byoc/gcp/stream.go b/src/pkg/cli/client/byoc/gcp/stream.go index 6e3de50b8..bcc33840a 100644 --- a/src/pkg/cli/client/byoc/gcp/stream.go +++ b/src/pkg/cli/client/byoc/gcp/stream.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "iter" + "log/slog" "path" "regexp" "strings" @@ -14,7 +15,6 @@ import ( "cloud.google.com/go/logging/apiv2/loggingpb" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/clouds/gcp" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" auditpb "google.golang.org/genproto/googleapis/cloud/audit" "google.golang.org/grpc/codes" @@ -71,7 +71,7 @@ func (s *ServerStream[T]) Follow(start time.Time) (iter.Seq2[*T, error], error) } query := s.query.GetQuery() shouldList := !start.IsZero() && start.Unix() > 0 && time.Since(start) > 10*time.Millisecond - term.Debugf("Query and tail logs since %v with query: \n%v", start, query) + slog.Debug(fmt.Sprintf("Query and tail logs since %v with query: \n%v", start, query)) return func(yield func(*T, error) bool) { defer tailer.Close() // Only query older logs if start time is more than 10ms ago @@ -126,7 +126,7 @@ func (s *ServerStream[T]) Follow(start time.Time) (iter.Seq2[*T, error], error) // Head returns an iterator that queries logs in ascending order. func (s *ServerStream[T]) Head(limit int32) iter.Seq2[*T, error] { query := s.query.GetQuery() - term.Debugf("Query logs with query: \n%v", query) + slog.Debug(fmt.Sprintf("Query logs with query: \n%v", query)) return func(yield func(*T, error) bool) { lister, err := s.gcpLogsClient.ListLogEntries(s.ctx, query, gcp.OrderAscending) if err != nil { @@ -140,7 +140,7 @@ func (s *ServerStream[T]) Head(limit int32) iter.Seq2[*T, error] { // Tail returns an iterator that queries logs in descending order, reversing if a limit is set. func (s *ServerStream[T]) Tail(limit int32) iter.Seq2[*T, error] { query := s.query.GetQuery() - term.Debugf("Query logs with query: \n%v", query) + slog.Debug(fmt.Sprintf("Query logs with query: \n%v", query)) return func(yield func(*T, error) bool) { lister, err := s.gcpLogsClient.ListLogEntries(s.ctx, query, gcp.OrderDescending) if err != nil { @@ -486,13 +486,13 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor } if entry.GetProtoPayload().GetTypeUrl() != "type.googleapis.com/google.cloud.audit.AuditLog" { - term.Warnf("unexpected log entry type : %v", entry.GetProtoPayload().GetTypeUrl()) + slog.Warn(fmt.Sprintf("unexpected log entry type : %v", entry.GetProtoPayload().GetTypeUrl())) return nil, nil } auditLog := new(auditpb.AuditLog) if err := entry.GetProtoPayload().UnmarshalTo(auditLog); err != nil { - term.Warnf("failed to unmarshal audit log : %v", err) + slog.Warn(fmt.Sprintf("failed to unmarshal audit log : %v", err)) return nil, nil } @@ -528,7 +528,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor Status: status.GetMessage(), }}, nil } else { - term.Warnf("missing request and response in audit log for service %v", path.Base(auditLog.GetResourceName())) + slog.Warn(fmt.Sprintf("missing request and response in audit log for service %v", path.Base(auditLog.GetResourceName()))) return nil, nil } @@ -551,7 +551,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor serviceName := GetValueInStruct(response, "spec.template.metadata.labels.defang-service") status := auditLog.GetStatus() if status == nil { - term.Warnf("missing status in audit log for job %v", path.Base(auditLog.GetResourceName())) + slog.Warn(fmt.Sprintf("missing status in audit log for job %v", path.Base(auditLog.GetResourceName()))) return nil, nil } var state defangv1.ServiceState @@ -579,7 +579,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor // Report all ready services when CD is successful, prevents cli deploy stop before cd is done return getReadyServicesCompletedResps(auditLog.GetStatus().GetMessage()), nil // Ignore success cd status when we are waiting for service status } else { - term.Warnf("unexpected execution name in audit log : %v", executionName) + slog.Warn(fmt.Sprintf("unexpected execution name in audit log : %v", executionName)) return nil, nil } case "gce_instance_group_manager": // Compute engine update start @@ -591,24 +591,24 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor managerName := entry.Resource.Labels["instance_group_manager_name"] labels, err := gcpLogsClient.GetInstanceGroupManagerLabels(ctx, project, region, managerName) if err != nil { - term.Warnf("failed to get instance group manager labels for %v: %v", managerName, err) + slog.Warn(fmt.Sprintf("failed to get instance group manager labels for %v: %v", managerName, err)) return nil, nil } serviceName := labels["defang-service"] if serviceName == "" { - term.Warnf("missing defang-service label in instance group manager %v", managerName) + slog.Warn(fmt.Sprintf("missing defang-service label in instance group manager %v", managerName)) return nil, nil } if etag != "" { labelEtag := labels["defang-etag"] if labelEtag != etag { - term.Warnf("skipping instance group manager %v: etag mismatch (got %q, want %q)", managerName, labelEtag, etag) + slog.Warn(fmt.Sprintf("skipping instance group manager %v: etag mismatch (got %q, want %q)", managerName, labelEtag, etag)) return nil, nil } } rootTriggerId := entry.GetLabels()["compute.googleapis.com/root_trigger_id"] if rootTriggerId == "" { - term.Warnf("missing root_trigger_id in audit log for instance group manager %v", path.Base(auditLog.GetResourceName())) + slog.Warn(fmt.Sprintf("missing root_trigger_id in audit log for instance group manager %v", path.Base(auditLog.GetResourceName()))) } else { computeEngineRootTriggers[rootTriggerId] = serviceName } @@ -622,12 +622,12 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor rootTriggerId := entry.GetLabels()["compute.googleapis.com/root_trigger_id"] serviceName, ok := computeEngineRootTriggers[rootTriggerId] if !ok { - term.Debugf("ignored root trigger id %v for instance group insert", rootTriggerId) + slog.Debug(fmt.Sprintf("ignored root trigger id %v for instance group insert", rootTriggerId)) return nil, nil } response := auditLog.GetResponse() if response == nil { - term.Warnf("missing response in audit log for instance group %v", path.Base(auditLog.GetResourceName())) + slog.Warn(fmt.Sprintf("missing response in audit log for instance group %v", path.Base(auditLog.GetResourceName()))) return nil, nil } status := response.GetFields()["status"].GetStringValue() @@ -653,7 +653,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor } bt, err := gcpLogsClient.GetBuildInfo(ctx, buildId) // TODO: Cache the build IDs? if err != nil { - term.Warnf("failed to get build tag for build %v: %v", buildId, err) + slog.Warn(fmt.Sprintf("failed to get build tag for build %v: %v", buildId, err)) return nil, nil } @@ -707,7 +707,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor }}, nil } default: - term.Warnf("unexpected resource type : %v", entry.Resource.Type) + slog.Warn(fmt.Sprintf("unexpected resource type : %v", entry.Resource.Type)) return nil, nil } } diff --git a/src/pkg/cli/client/byoc/state/parse.go b/src/pkg/cli/client/byoc/state/parse.go index 76e19b37e..617b753da 100644 --- a/src/pkg/cli/client/byoc/state/parse.go +++ b/src/pkg/cli/client/byoc/state/parse.go @@ -4,11 +4,11 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "path" "strconv" "strings" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" ) @@ -86,12 +86,12 @@ func ParsePulumiStateFile(ctx context.Context, obj BucketObj, bucket string, obj Name: path.Base(stackFile), // legacy logic to derive stack name from file name } if state.Version != 3 { - term.Debug("Skipping Pulumi state with version", state.Version) + slog.Debug(fmt.Sprintln("Skipping Pulumi state with version", state.Version)) } else if len(state.Checkpoint.Latest.PendingOperations) > 0 { for _, op := range state.Checkpoint.Latest.PendingOperations { parts := strings.Split(op.Resource.Urn, "::") // prefix::project::type::resource => {urn:provider:stack}::{project}::{plugin:file:class}::{name} if len(parts) < 4 { - term.Debug("Skipping pending operation with malformed URN:", op.Resource.Urn) + slog.Debug(fmt.Sprintln("Skipping pending operation with malformed URN:", op.Resource.Urn)) continue } stack.Pending = append(stack.Pending, parts[3]) diff --git a/src/pkg/cli/client/caniuse.go b/src/pkg/cli/client/caniuse.go index eb9d5b58d..f1effdadc 100644 --- a/src/pkg/cli/client/caniuse.go +++ b/src/pkg/cli/client/caniuse.go @@ -3,9 +3,10 @@ package client import ( "context" "errors" + "fmt" + "log/slog" "os" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -65,24 +66,24 @@ type versionLabel string // resolveVersion picks the version to use: env override > force upgrade > allow upgrade > pin to previous > latest. func resolveVersion(fromEnv, fromFabric, previous string, label versionLabel, allowUpgrade bool, forcedReason string) string { if fromEnv != "" { - term.Debugf("Using %s from env: %s", label, fromEnv) + slog.Debug(fmt.Sprintf("Using %s from env: %s", label, fromEnv)) return fromEnv } if previous == "" || fromFabric == previous { - term.Debugf("Using %s: %s", label, fromFabric) + slog.Debug(fmt.Sprintf("Using %s: %s", label, fromFabric)) return fromFabric } if forcedReason != "" { - term.Debugf("Using %s from fabric: %s", label, fromFabric) - term.Warnf("Overriding %s: %s", label, forcedReason) + slog.Debug(fmt.Sprintf("Using %s from fabric: %s", label, fromFabric)) + slog.Warn(fmt.Sprintf("Overriding %s: %s", label, forcedReason)) return fromFabric } if allowUpgrade { - term.Debugf("Using latest %s: %s", label, fromFabric) - term.Infof("Upgrading %s to latest", label) + slog.Debug(fmt.Sprintf("Using latest %s: %s", label, fromFabric)) + slog.Info(fmt.Sprintf("Upgrading %s to latest", label)) return fromFabric } - term.Debugf("Using previous %s: %s", label, previous) - term.Warnf("A newer %s is available; using previously deployed version. To upgrade, re-run with --allow-upgrade or set DEFANG_ALLOW_UPGRADE=1", label) + slog.Debug(fmt.Sprintf("Using previous %s: %s", label, previous)) + slog.Warn(fmt.Sprintf("A newer %s is available; using previously deployed version. To upgrade, re-run with --allow-upgrade or set DEFANG_ALLOW_UPGRADE=1", label)) return previous } diff --git a/src/pkg/cli/client/cluster.go b/src/pkg/cli/client/cluster.go index 174c85368..4e64bba3d 100644 --- a/src/pkg/cli/client/cluster.go +++ b/src/pkg/cli/client/cluster.go @@ -1,13 +1,14 @@ package client import ( + "fmt" + "log/slog" "net" "os" "path/filepath" "strings" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/tokenstore" ) @@ -42,18 +43,18 @@ func GetExistingToken(fabricAddr string) string { var accessToken = os.Getenv("DEFANG_ACCESS_TOKEN") if accessToken != "" { - term.Debug("Using access token from env DEFANG_ACCESS_TOKEN") + slog.Debug("Using access token from env DEFANG_ACCESS_TOKEN") } else { var err error accessToken, err = TokenStore.Load(TokenStorageName(fabricAddr)) if err != nil { - term.Debugf("failed to load access token for %v: %v", fabricAddr, err) + slog.Debug(fmt.Sprintf("failed to load access token for %v: %v", fabricAddr, err)) } // Check if we wrote an IDToken file during login, if AWS_WEB_IDENTITY_TOKEN_FILE is empty, if os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE") == "" { if jwtPath, err := GetWebIdentityTokenFile(fabricAddr); err == nil { - term.Debugf("using web identity token from %s", jwtPath) + slog.Debug("using web identity token from " + jwtPath) // Set AWS env vars for this CLI invocation os.Setenv("AWS_WEB_IDENTITY_TOKEN_FILE", jwtPath) os.Setenv("AWS_ROLE_SESSION_NAME", "defang-cli") // TODO: from WhoAmI diff --git a/src/pkg/cli/client/grpc_logger.go b/src/pkg/cli/client/grpc_logger.go index a2b32c409..3c2d1cc86 100644 --- a/src/pkg/cli/client/grpc_logger.go +++ b/src/pkg/cli/client/grpc_logger.go @@ -3,11 +3,12 @@ package client import ( "context" "encoding/json" + "fmt" + "log/slog" "net/http" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" ) const maxPayloadLength = 1024 @@ -38,7 +39,7 @@ func (g grpcLogger) logRequest(header http.Header, reqType, payload string) { requestId := pkg.RandomID() header.Set("X-Request-Id", requestId) - term.Debug(g.prefix, requestId, reqType, payload) + slog.Debug(fmt.Sprintln(g.prefix, requestId, reqType, payload)) } func (g grpcLogger) WrapStreamingClient(next connect.StreamingClientFunc) connect.StreamingClientFunc { diff --git a/src/pkg/cli/client/playground.go b/src/pkg/cli/client/playground.go index 8ce49d51e..ccbdecb62 100644 --- a/src/pkg/cli/client/playground.go +++ b/src/pkg/cli/client/playground.go @@ -3,14 +3,15 @@ package client import ( "context" "errors" + "fmt" "io" "iter" + "log/slog" "os" "connectrpc.com/connect" byocState "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/state" "github.com/DefangLabs/defang/src/pkg/dns" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -183,7 +184,7 @@ func (g *PlaygroundProvider) RemoteProjectName(ctx context.Context) (string, err if resp.Project == "" { return "", errors.New("no Playground projects found") } - term.Debug("Using default Playground project: ", resp.Project) + slog.Debug(fmt.Sprintln("Using default Playground project: ", resp.Project)) return resp.Project, nil } diff --git a/src/pkg/cli/client/pretty_error.go b/src/pkg/cli/client/pretty_error.go index ff17ca3a8..ba7911f7e 100644 --- a/src/pkg/cli/client/pretty_error.go +++ b/src/pkg/cli/client/pretty_error.go @@ -3,17 +3,17 @@ package client import ( "errors" "fmt" + "log/slog" "strings" "connectrpc.com/connect" - "github.com/DefangLabs/defang/src/pkg/term" ) func PrettyError(err error) error { // To avoid printing the internal gRPC error code var cerr *connect.Error if errors.As(err, &cerr) { - term.Debug("Server error:", cerr) + slog.Debug(fmt.Sprintln("Server error:", cerr)) err = errors.Unwrap(cerr) } if IsNetworkError(err) { diff --git a/src/pkg/cli/client/projectName.go b/src/pkg/cli/client/projectName.go index 78958a68c..09520dacb 100644 --- a/src/pkg/cli/client/projectName.go +++ b/src/pkg/cli/client/projectName.go @@ -3,8 +3,7 @@ package client import ( "context" "fmt" - - "github.com/DefangLabs/defang/src/pkg/term" + "log/slog" ) // Deprecated: should use stacks instead of ProjectName fallback. @@ -15,10 +14,10 @@ func LoadProjectNameWithFallback(ctx context.Context, loader Loader, provider Pr if err == nil { return projectName, nil } - term.Debug("Failed to load local project:", err) + slog.Debug(fmt.Sprintln("Failed to load local project:", err)) loadErr = err } - term.Debug("Trying to get the remote project name from the provider") + slog.Debug("Trying to get the remote project name from the provider") projectName, err := provider.RemoteProjectName(ctx) if err != nil { return "", fmt.Errorf("%w and %w", loadErr, err) diff --git a/src/pkg/cli/common.go b/src/pkg/cli/common.go index 2b2163d66..07b58a996 100644 --- a/src/pkg/cli/common.go +++ b/src/pkg/cli/common.go @@ -3,6 +3,7 @@ package cli import ( "context" "encoding/json" + "log/slog" "os" "github.com/DefangLabs/defang/src/pkg/cli/client" @@ -91,7 +92,7 @@ func putDeploymentAndStack(ctx context.Context, provider client.Provider, fabric if err != nil { return err } - term.Debugf("Deployment origin metadata: %s", string(originMetadataBytes)) + slog.Debug("Deployment origin metadata: " + string(originMetadataBytes)) } return fabric.PutDeployment(ctx, &defangv1.PutDeploymentRequest{ diff --git a/src/pkg/cli/compose/baseimage.go b/src/pkg/cli/compose/baseimage.go index 4b10a0519..858b174e3 100644 --- a/src/pkg/cli/compose/baseimage.go +++ b/src/pkg/cli/compose/baseimage.go @@ -2,12 +2,12 @@ package compose import ( "fmt" + "log/slog" "maps" "os" "path/filepath" "slices" - "github.com/DefangLabs/defang/src/pkg/term" composeTypes "github.com/compose-spec/compose-go/v2/types" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" @@ -25,7 +25,7 @@ func FindAllBaseImages(project *composeTypes.Project) ([]string, error) { images, err := extractDockerfileBaseImages(dockerfileFullPath) if err != nil { if os.IsNotExist(err) { - term.Debugf("service %q: dockerfile %q does not exist; skipping", service.Name, dockerfileFullPath) + slog.Debug(fmt.Sprintf("service %q: dockerfile %q does not exist; skipping", service.Name, dockerfileFullPath)) continue } return nil, err diff --git a/src/pkg/cli/compose/compose_test.go b/src/pkg/cli/compose/compose_test.go index 7c3698ee9..a24336e8f 100644 --- a/src/pkg/cli/compose/compose_test.go +++ b/src/pkg/cli/compose/compose_test.go @@ -2,9 +2,11 @@ package compose import ( "bytes" + "log/slog" "os" "testing" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" ) @@ -189,6 +191,7 @@ func TestComposeGoNoDoubleWarningLog(t *testing.T) { var warnings bytes.Buffer term.DefaultTerm = term.NewTerm(os.Stdin, &warnings, &warnings) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) loader := NewLoader(WithPath("../../../testdata/compose-go-warn/compose.yaml")) _, err := loader.LoadProject(t.Context()) diff --git a/src/pkg/cli/compose/context.go b/src/pkg/cli/compose/context.go index 40d1106a1..86cd26b91 100644 --- a/src/pkg/cli/compose/context.go +++ b/src/pkg/cli/compose/context.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "io/fs" + "log/slog" "os" "path/filepath" "strings" @@ -219,7 +220,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec return fmt.Sprintf("s3://cd-preview/%s%s", service, archiveType.Extension), nil } - term.Info("Packaging the project files for", service, "at", root) + slog.Info(fmt.Sprintln("Packaging the project files for", service, "at", root)) buffer, err := createArchive(ctx, build.Context, build.Dockerfile, archiveType) if err != nil { return "", err @@ -230,7 +231,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec case UploadModeDefault, UploadModeDigest: // Calculate the digest of the tarball and pass it to the fabric controller (to avoid building the same image twice) digest = calcDigest(buffer.Bytes()) - term.Debugf("Digest for %q: %s", service, digest) + slog.Debug(fmt.Sprintf("Digest for %q: %s", service, digest)) case UploadModePreview: // For preview, we invoke the CD "preview" command, which will want a valid (S3) URL for diff, even though it won't be used digest = calcDigest(buffer.Bytes()) @@ -241,7 +242,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec panic("unexpected UploadMode value") } - term.Info("Uploading the project files for", service) + slog.Info(fmt.Sprintln("Uploading the project files for", service)) return uploadArchive(ctx, provider, projectName, buffer, archiveType, digest) } @@ -297,7 +298,7 @@ func tryReadIgnoreFile(cwd, ignorefile string) io.ReadCloser { if err != nil { return nil } - term.Debug("Reading .dockerignore file from", ignorefile) + slog.Debug(fmt.Sprintln("Reading .dockerignore file from", ignorefile)) return reader } @@ -306,7 +307,7 @@ func tryReadIgnoreFile(cwd, ignorefile string) io.ReadCloser { // Returns the filename of the written file and an error. func writeDefaultIgnoreFile(cwd string, dockerignore string) (string, error) { path := filepath.Join(cwd, dockerignore) - term.Debug("Writing .dockerignore file to", path) + slog.Debug(fmt.Sprintln("Writing .dockerignore file to", path)) err := os.WriteFile(path, []byte(defaultDockerIgnore), 0644) if err != nil { @@ -369,7 +370,7 @@ func walkContextFolder(root, dockerfile string, writeIgnore writeIgnoreFile, fn if dockerignore == "" && writeIgnore { // Generate a default .dockerignore file if none exists (to be included in the context) - term.Warn("No .dockerignore file found; creating default .dockerignore; you may add this to source control (git)") + slog.Warn("No .dockerignore file found; creating default .dockerignore; you may add this to source control (git)") var err error dockerignore, err = writeDefaultIgnoreFile(root, dotdockerignore) if err != nil { @@ -412,7 +413,7 @@ func walkContextFolder(root, dockerfile string, writeIgnore writeIgnoreFile, fn return err } if ignore { - term.Debug("Ignoring", relPath) // TODO: avoid printing in this function + slog.Debug(fmt.Sprintln("Ignoring", relPath)) // TODO: avoid printing in this function if de.IsDir() { return filepath.SkipDir } @@ -447,7 +448,7 @@ func createArchive(ctx context.Context, root string, dockerfile string, contentT doProgress := term.StdoutCanColor() && term.IsTerminal() err := walkContextFolder(root, dockerfile, writeIgnoreFileYes, func(path string, de os.DirEntry, slashPath string) error { if term.DoDebug() { - term.Debug("Adding", slashPath) + slog.Debug(fmt.Sprintln("Adding", slashPath)) } else if doProgress { term.Printf("%4d %s\r", fileCount, slashPath) defer term.ClearLine() @@ -474,7 +475,7 @@ func createArchive(ctx context.Context, root string, dockerfile string, contentT fileCount++ if fileCount == ContextFileLimit+1 { - term.Warnf("the build context contains more than %d files; use --debug or create .dockerignore to exclude caches and build artifacts", ContextFileLimit) + slog.Warn(fmt.Sprintf("the build context contains more than %d files; use --debug or create .dockerignore to exclude caches and build artifacts", ContextFileLimit)) } bufLen := buf.Len() @@ -483,7 +484,7 @@ func createArchive(ctx context.Context, root string, dockerfile string, contentT return fmt.Errorf("the build context is limited to %s; consider downloading large files in the Dockerfile or set the DEFANG_BUILD_CONTEXT_LIMIT environment variable", units.BytesSize(float64(ContextSizeHardLimit))) } if bufLen <= ContextSizeSoftLimit && buf.Len() > ContextSizeSoftLimit { - term.Warnf("the build context is larger than %s; use --debug or create .dockerignore to exclude caches and build artifacts", units.BytesSize(float64(buf.Len()))) + slog.Warn(fmt.Sprintf("the build context is larger than %s; use --debug or create .dockerignore to exclude caches and build artifacts", units.BytesSize(float64(buf.Len())))) } return err }) diff --git a/src/pkg/cli/compose/dockerfile_validator.go b/src/pkg/cli/compose/dockerfile_validator.go index 8f55ab967..f21e4f44e 100644 --- a/src/pkg/cli/compose/dockerfile_validator.go +++ b/src/pkg/cli/compose/dockerfile_validator.go @@ -3,11 +3,11 @@ package compose import ( "bytes" "fmt" + "log/slog" "os" "path/filepath" "strings" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/moby/buildkit/frontend/dockerfile/parser" ) @@ -35,7 +35,7 @@ func (e *DockerfileValidationError) Unwrap() error { // ValidateDockerfile validates the syntax and basic structure of a Dockerfile func ValidateDockerfile(dockerfilePath string, serviceName string) error { - term.Debugf("Validating Dockerfile: %s for service %q", dockerfilePath, serviceName) + slog.Debug(fmt.Sprintf("Validating Dockerfile: %s for service %q", dockerfilePath, serviceName)) // Read the Dockerfile content, err := os.ReadFile(dockerfilePath) @@ -124,7 +124,7 @@ func ValidateDockerfile(dockerfilePath string, serviceName string) error { } } // Log warnings but don't fail validation - term.Warnf("service %q: Dockerfile %q has warnings:\n %s", serviceName, dockerfilePath, strings.Join(warnings, "\n ")) + slog.Warn(fmt.Sprintf("service %q: Dockerfile %q has warnings:\n %s", serviceName, dockerfilePath, strings.Join(warnings, "\n "))) } return nil @@ -161,7 +161,7 @@ func ValidateServiceDockerfiles(project *Project) error { if os.IsNotExist(err) { // This might be handled later by Railpack or may be a remote context // Only validate if the file exists - term.Debugf("Skipping validation for service %q: Dockerfile %q does not exist", service.Name, dockerfilePath) + slog.Debug(fmt.Sprintf("Skipping validation for service %q: Dockerfile %q does not exist", service.Name, dockerfilePath)) continue } errors = append(errors, &DockerfileValidationError{ diff --git a/src/pkg/cli/compose/fixup.go b/src/pkg/cli/compose/fixup.go index 615cae441..67a6f8b43 100644 --- a/src/pkg/cli/compose/fixup.go +++ b/src/pkg/cli/compose/fixup.go @@ -3,6 +3,7 @@ package compose import ( "context" "fmt" + "log/slog" "os" "path/filepath" "slices" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" composeTypes "github.com/compose-spec/compose-go/v2/types" ) @@ -32,14 +32,14 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // Preload the current config so we can detect which environment variables should be passed as "secrets" config, err := provider.ListConfig(ctx, &defangv1.ListConfigsRequest{Project: project.Name}) if err != nil { - term.Debugf("failed to load config: %v", err) + slog.Debug(fmt.Sprintf("failed to load config: %v", err)) config = &defangv1.Secrets{} } slices.Sort(config.Names) // sort for binary search accountInfo, err := provider.AccountInfo(ctx) if err != nil { - term.Debugf("failed to get account info to fixup services: %v", err) + slog.Debug(fmt.Sprintf("failed to get account info to fixup services: %v", err)) accountInfo = &client.AccountInfo{} } @@ -83,7 +83,7 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // Ignore "build" config if we have "image", unless in --build or --force mode if svccfg.Image != "" && svccfg.Build != nil && upload != UploadModeDigest && upload != UploadModeForce { - term.Warnf("service %q: using published image instead of rebuilding; pass --build to build and publish a new image", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: using published image instead of rebuilding; pass --build to build and publish a new image", svccfg.Name)) svccfg.Build = nil } @@ -107,7 +107,7 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // Check if the dockerfile exists dockerfilePath := filepath.Join(svccfg.Build.Context, svccfg.Build.Dockerfile) if _, err := os.Stat(dockerfilePath); err != nil { - term.Debugf("stat %q: %v", dockerfilePath, err) + slog.Debug(fmt.Sprintf("stat %q: %v", dockerfilePath, err)) // In this case we know that the dockerfile is not in the location the compose file specifies, // so can assume that the dockerfile has been normalized to the default "Dockerfile". if svccfg.Build.Dockerfile != "Dockerfile" { @@ -155,14 +155,14 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo } if len(removedArgs) > 0 { - term.Warnf("service %q: skipping unset build argument %q", svccfg.Name, removedArgs) + slog.Warn(fmt.Sprintf("service %q: skipping unset build argument %q", svccfg.Name, removedArgs)) } } // Fixup secret references; secrets are supposed to be files, not env, but it's kept for backward compatibility for i, secret := range svccfg.Secrets { if i == 0 { // only warn once - term.Warnf("service %q: secrets will be exposed as environment variables, not files (use 'environment' instead)", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: secrets will be exposed as environment variables, not files (use 'environment' instead)", svccfg.Name)) } svccfg.Environment[secret.Source] = nil } @@ -176,7 +176,7 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // A bug in Compose-go env file parsing can cause empty keys if key == "" { if !shownOnce { - term.Warnf("service %q: skipping unset environment variable key", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: skipping unset environment variable key", svccfg.Name)) shownOnce = true } delete(svccfg.Environment, key) // remove the empty key; this is safe @@ -204,17 +204,17 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo } if len(notAdjusted) > 0 { - term.Warnf("service %q: environment variable(s) %q will use the `defang config` value instead of adjusted service name", svccfg.Name, notAdjusted) + slog.Warn(fmt.Sprintf("service %q: environment variable(s) %q will use the `defang config` value instead of adjusted service name", svccfg.Name, notAdjusted)) } if len(overridden) > 0 { - term.Warnf("service %q: environment variable(s) %q overridden by config", svccfg.Name, overridden) + slog.Warn(fmt.Sprintf("service %q: environment variable(s) %q overridden by config", svccfg.Name, overridden)) } _, scaling := svccfg.Extensions["x-defang-autoscaling"] if scaling { if _, ok := provider.(*client.PlaygroundProvider); ok { - term.Warnf("service %q: auto-scaling is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: auto-scaling is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name)) } } @@ -252,7 +252,7 @@ func fixupLLM(svccfg *composeTypes.ServiceConfig) { // HACK: we must have at least one host port to get a CNAME for the service // litellm listens on 4000 by default var port uint32 = liteLLMPort - term.Debugf("service %q: adding LLM host port %d", svccfg.Name, port) + slog.Debug(fmt.Sprintf("service %q: adding LLM host port %d", svccfg.Name, port)) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } } @@ -260,7 +260,7 @@ func fixupLLM(svccfg *composeTypes.ServiceConfig) { func fixupPostgresService(svccfg *composeTypes.ServiceConfig, provider client.Provider, upload UploadMode) error { _, managedPostgres := svccfg.Extensions["x-defang-postgres"] if _, ok := provider.(*client.PlaygroundProvider); ok && managedPostgres && upload != UploadModeEstimate { - term.Warnf("service %q: managed postgres is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: managed postgres is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name)) } if len(svccfg.Ports) == 0 { // HACK: we must have at least one host port to get a CNAME for the service @@ -273,7 +273,7 @@ func fixupPostgresService(svccfg *composeTypes.ServiceConfig, provider client.Pr return err } } - term.Debugf("service %q: adding postgres host port %d", svccfg.Name, port) + slog.Debug(fmt.Sprintf("service %q: adding postgres host port %d", svccfg.Name, port)) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } else { fixupIngressPorts(svccfg) @@ -284,7 +284,7 @@ func fixupPostgresService(svccfg *composeTypes.ServiceConfig, provider client.Pr func fixupMongoService(svccfg *composeTypes.ServiceConfig, provider client.Provider, upload UploadMode) error { _, managedMongo := svccfg.Extensions["x-defang-mongodb"] if _, ok := provider.(*client.PlaygroundProvider); ok && managedMongo && upload != UploadModeEstimate { - term.Warnf("service %q: managed mongodb is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: managed mongodb is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name)) } if len(svccfg.Ports) == 0 { // HACK: we must have at least one host port to get a CNAME for the service @@ -311,7 +311,7 @@ func fixupMongoService(svccfg *composeTypes.ServiceConfig, provider client.Provi } break // done } - term.Debugf("service %q: adding mongodb host port %d", svccfg.Name, port) + slog.Debug(fmt.Sprintf("service %q: adding mongodb host port %d", svccfg.Name, port)) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } else { fixupIngressPorts(svccfg) @@ -322,7 +322,7 @@ func fixupMongoService(svccfg *composeTypes.ServiceConfig, provider client.Provi func fixupRedisService(svccfg *composeTypes.ServiceConfig, provider client.Provider, upload UploadMode) error { _, managedRedis := svccfg.Extensions["x-defang-redis"] if _, ok := provider.(*client.PlaygroundProvider); ok && managedRedis && upload != UploadModeEstimate { - term.Warnf("service %q: Managed redis is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: Managed redis is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name)) } if len(svccfg.Ports) == 0 { // HACK: we must have at least one host port to get a CNAME for the service https://redis.io/docs/latest/operate/oss_and_stack/management/config/ @@ -339,7 +339,7 @@ func fixupRedisService(svccfg *composeTypes.ServiceConfig, provider client.Provi // continue; last one wins } } - term.Debugf("service %q: adding redis host port %d", svccfg.Name, port) + slog.Debug(fmt.Sprintf("service %q: adding redis host port %d", svccfg.Name, port)) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } else { fixupIngressPorts(svccfg) @@ -350,7 +350,7 @@ func fixupRedisService(svccfg *composeTypes.ServiceConfig, provider client.Provi func fixupIngressPorts(svccfg *composeTypes.ServiceConfig) { for i, port := range svccfg.Ports { if port.Mode == Mode_INGRESS || port.Mode == "" { - term.Debugf("service %q: changing port %d to host mode", svccfg.Name, port.Target) + slog.Debug(fmt.Sprintf("service %q: changing port %d to host mode", svccfg.Name, port.Target)) svccfg.Ports[i].Mode = Mode_HOST } } @@ -448,7 +448,7 @@ func configureAccessGateway(svccfg *composeTypes.ServiceConfig, project *compose if openAIKey == "" { openAIKey = *key } else if *key != openAIKey { - term.Errorf("multiple different OPENAI_API_KEY values found in services depending on %q", svccfg.Name) + slog.Error(fmt.Sprintf("multiple different OPENAI_API_KEY values found in services depending on %q", svccfg.Name)) break } } @@ -542,16 +542,16 @@ func GetImageRepo(image string) string { func fixupPort(port composeTypes.ServicePortConfig) composeTypes.ServicePortConfig { switch port.Mode { case "": - term.Warnf("port %d: no 'mode' was specified; defaulting to 'ingress' (add 'mode: ingress' to silence)", port.Target) + slog.Warn(fmt.Sprintf("port %d: no 'mode' was specified; defaulting to 'ingress' (add 'mode: ingress' to silence)", port.Target)) fallthrough case Mode_INGRESS: // This code is unnecessarily complex because compose-go silently converts short `ports:` syntax to ingress+tcp if port.Protocol == Protocol_UDP { - term.Warnf("port %d: UDP ports default to 'host' mode (add 'mode: host' to silence)", port.Target) + slog.Warn(fmt.Sprintf("port %d: UDP ports default to 'host' mode (add 'mode: host' to silence)", port.Target)) port.Mode = Mode_HOST } else { if port.Published != "" { - term.Debugf("port %d: ignoring 'published: %s' in 'ingress' mode", port.Target, port.Published) + slog.Debug(fmt.Sprintf("port %d: ignoring 'published: %s' in 'ingress' mode", port.Target, port.Published)) } if port.AppProtocol == "" { // TCP ingress is not supported; assuming HTTP (add 'app_protocol: http' to silence)" diff --git a/src/pkg/cli/compose/loader.go b/src/pkg/cli/compose/loader.go index 4c0aba535..210e6f079 100644 --- a/src/pkg/cli/compose/loader.go +++ b/src/pkg/cli/compose/loader.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "path/filepath" "regexp" @@ -177,16 +178,16 @@ func (l *Loader) newProjectOptions(suppressWarn bool) (*cli.ProjectOptions, erro if hasSubstitution(templ, key) { // We don't (yet) support substitution patterns during deployment if inEnv && !suppressWarn { - term.Warnf("Environment variable %q is ignored; add it to `.env` if needed", key) + slog.Warn(fmt.Sprintf("Environment variable %q is ignored; add it to `.env` if needed", key)) } else { - term.Debugf("Unresolved environment variable %q", key) + slog.Debug(fmt.Sprintf("Unresolved environment variable %q", key)) } return "", false } if inEnv && !suppressWarn { - term.Warnf("Environment variable %q is ignored; add it to `.env` or it may be resolved from config during deployment", key) + slog.Warn(fmt.Sprintf("Environment variable %q is ignored; add it to `.env` or it may be resolved from config during deployment", key)) } else { - term.Debugf("Environment variable %q was not resolved locally. It may be resolved from config during deployment", key) + slog.Debug(fmt.Sprintf("Environment variable %q was not resolved locally. It may be resolved from config during deployment", key)) } // Leave unresolved variables as-is for resolution later by CD return "${" + key + "}", true diff --git a/src/pkg/cli/compose/serviceNameReplacer.go b/src/pkg/cli/compose/serviceNameReplacer.go index 0ef4c4c75..d11a893bd 100644 --- a/src/pkg/cli/compose/serviceNameReplacer.go +++ b/src/pkg/cli/compose/serviceNameReplacer.go @@ -2,12 +2,13 @@ package compose import ( "context" + "fmt" + "log/slog" "regexp" "slices" "strings" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" composeTypes "github.com/compose-spec/compose-go/v2/types" ) @@ -29,7 +30,7 @@ type ServiceNameReplacer struct { func NewServiceNameReplacer(ctx context.Context, dnsResolver client.DNSResolver, project *composeTypes.Project) ServiceNameReplacer { var skipPublicReplacement bool if err := dnsResolver.UpdateShardDomain(ctx); err != nil { - term.Debugf("failed to update shard domain: %v", err) + slog.Debug(fmt.Sprintf("failed to update shard domain: %v", err)) skipPublicReplacement = true } // Create a regexp to detect private service names in environment variable and build arg values @@ -74,7 +75,7 @@ func (s *ServiceNameReplacer) replaceServiceNameWithDNS(value string) string { serviceEnd := match[3] serviceName := value[serviceStart:serviceEnd] if s.skipPublicReplacement { - term.Warnf("service %q: reference to public DNS cannot be replaced in %q, use `defang login` and try again", serviceName, value) + slog.Warn(fmt.Sprintf("service %q: reference to public DNS cannot be replaced in %q, use `defang login` and try again", serviceName, value)) } else { return value[:serviceStart] + s.dnsResolver.ServicePublicDNS(NormalizeServiceName(serviceName), s.projectName) + value[serviceEnd:] } @@ -88,9 +89,9 @@ func (s *ServiceNameReplacer) ReplaceServiceNameWithDNS(serviceName string, key, val := s.replaceServiceNameWithDNS(value) if val != value { - term.Debugf("service %q: service name was adjusted: %s %q assigned value %q", serviceName, fixupTarget, key, val) + slog.Debug(fmt.Sprintf("service %q: service name was adjusted: %s %q assigned value %q", serviceName, fixupTarget, key, val)) } else if s.publicServiceNames != nil && s.publicServiceNames.MatchString(value) { - term.Debugf("service %q: service name in the %s %q was not adjusted; only references to other services with port mode set to 'host' will be fixed-up", serviceName, fixupTarget, key) + slog.Debug(fmt.Sprintf("service %q: service name in the %s %q was not adjusted; only references to other services with port mode set to 'host' will be fixed-up", serviceName, fixupTarget, key)) } return val diff --git a/src/pkg/cli/compose/serviceNameReplacer_test.go b/src/pkg/cli/compose/serviceNameReplacer_test.go index 209f836e3..fc7944be5 100644 --- a/src/pkg/cli/compose/serviceNameReplacer_test.go +++ b/src/pkg/cli/compose/serviceNameReplacer_test.go @@ -3,11 +3,13 @@ package compose import ( "bytes" "context" + "log/slog" "os" "testing" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/dns" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" composeTypes "github.com/compose-spec/compose-go/v2/types" ) @@ -121,6 +123,7 @@ func TestServiceNameReplacer(t *testing.T) { prevTerm := term.DefaultTerm var out, err bytes.Buffer term.DefaultTerm = term.NewTerm(os.Stdin, &out, &err) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) t.Cleanup(func() { term.DefaultTerm = prevTerm }) diff --git a/src/pkg/cli/compose/validation.go b/src/pkg/cli/compose/validation.go index b8f4e35ce..522b17e22 100644 --- a/src/pkg/cli/compose/validation.go +++ b/src/pkg/cli/compose/validation.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "path/filepath" "regexp" @@ -16,7 +17,6 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/clouds/gcp" "github.com/DefangLabs/defang/src/pkg/modes" - "github.com/DefangLabs/defang/src/pkg/term" composeTypes "github.com/compose-spec/compose-go/v2/types" ) @@ -59,16 +59,16 @@ func ValidateProject(project *composeTypes.Project, mode modes.Mode) error { func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.Project, mode modes.Mode) error { if svccfg.ReadOnly { - term.Debugf("service %q: unsupported compose directive: read_only", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: read_only", svccfg.Name)) } if svccfg.Restart == "" { // This was a warning, but we don't really care and want to reduce the noise - term.Debugf("service %q: missing compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: missing compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", svccfg.Name)) } else if svccfg.Restart != "always" && svccfg.Restart != "unless-stopped" { - term.Debugf("service %q: unsupported compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", svccfg.Name)) } if svccfg.ContainerName != "" { - term.Debugf("service %q: unsupported compose directive: container_name", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: container_name", svccfg.Name)) } if svccfg.Hostname != "" { return fmt.Errorf("service %q: unsupported compose directive: hostname; consider using 'domainname' instead", svccfg.Name) @@ -77,7 +77,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: dns_search", svccfg.Name) } if len(svccfg.DNSOpts) != 0 { - term.Debugf("service %q: unsupported compose directive: dns_opt", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: dns_opt", svccfg.Name)) } if len(svccfg.DNS) != 0 { return fmt.Errorf("service %q: unsupported compose directive: dns", svccfg.Name) @@ -95,37 +95,37 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: group_add", svccfg.Name) } if len(svccfg.Ipc) > 0 { - term.Debugf("service %q: unsupported compose directive: ipc", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: ipc", svccfg.Name)) } if len(svccfg.Uts) > 0 { - term.Debugf("service %q: unsupported compose directive: uts", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: uts", svccfg.Name)) } if svccfg.Isolation != "" { - term.Debugf("service %q: unsupported compose directive: isolation", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: isolation", svccfg.Name)) } if svccfg.MacAddress != "" { - term.Debugf("service %q: unsupported compose directive: mac_address", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: mac_address", svccfg.Name)) } if len(svccfg.Labels) > 0 { - term.Debugf("service %q: unsupported compose directive: labels", svccfg.Name) // TODO: add support for labels + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: labels", svccfg.Name)) // TODO: add support for labels } if len(svccfg.Links) > 0 { - term.Debugf("service %q: unsupported compose directive: links", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: links", svccfg.Name)) } if svccfg.Logging != nil { - term.Debugf("service %q: unsupported compose directive: logging", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: logging", svccfg.Name)) } for name := range svccfg.Networks { if _, ok := project.Networks[name]; !ok { // This was a warning, but we don't really care and want to reduce the noise - term.Debugf("service %q: network %q is not defined in the top-level networks section", svccfg.Name, name) + slog.Debug(fmt.Sprintf("service %q: network %q is not defined in the top-level networks section", svccfg.Name, name)) } } if len(svccfg.Volumes) > 0 { - term.Warnf("service %q: unsupported compose directive: volumes", svccfg.Name) // TODO: add support for volumes + slog.Warn(fmt.Sprintf("service %q: unsupported compose directive: volumes", svccfg.Name)) // TODO: add support for volumes } if len(svccfg.VolumesFrom) > 0 { - term.Warnf("service %q: unsupported compose directive: volumes_from", svccfg.Name) // TODO: add support for volumes_from + slog.Warn(fmt.Sprintf("service %q: unsupported compose directive: volumes_from", svccfg.Name)) // TODO: add support for volumes_from } if svccfg.Build != nil { _, err := filepath.Abs(svccfg.Build.Context) @@ -144,22 +144,22 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: build ssh", svccfg.Name) } if len(svccfg.Build.Labels) != 0 { - term.Debugf("service %q: unsupported compose directive: build labels", svccfg.Name) // TODO: add support for Kaniko --label + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: build labels", svccfg.Name)) // TODO: add support for Kaniko --label } if len(svccfg.Build.CacheFrom) != 0 { - term.Debugf("service %q: unsupported compose directive: build cache_from", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: build cache_from", svccfg.Name)) } if len(svccfg.Build.CacheTo) != 0 { - term.Debugf("service %q: unsupported compose directive: build cache_to", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: build cache_to", svccfg.Name)) } if svccfg.Build.NoCache { - term.Debugf("service %q: unsupported compose directive: build no_cache", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: build no_cache", svccfg.Name)) } if len(svccfg.Build.ExtraHosts) != 0 { return fmt.Errorf("service %q: unsupported compose directive: build extra_hosts", svccfg.Name) } if svccfg.Build.Isolation != "" { - term.Debugf("service %q: unsupported compose directive: build isolation", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: build isolation", svccfg.Name)) } if svccfg.Build.Network != "" { return fmt.Errorf("service %q: unsupported compose directive: build network", svccfg.Name) @@ -183,7 +183,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: build additional_contexts", svccfg.Name) } if svccfg.Build.Ulimits != nil { - term.Warnf("service %q: unsupported compose directive: build ulimits", svccfg.Name) // TODO: add support for build ulimits + slog.Warn(fmt.Sprintf("service %q: unsupported compose directive: build ulimits", svccfg.Name)) // TODO: add support for build ulimits } } for _, secret := range svccfg.Secrets { @@ -193,11 +193,11 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P // secret.Target will always be automatically constructed by compose-go to "/run/secrets/" if s, ok := project.Secrets[secret.Source]; !ok { // This was a warning, but we don't really care and want to reduce the noise - term.Debugf("secret %q is not defined in the top-level secrets section", secret.Source) + slog.Debug(fmt.Sprintf("secret %q is not defined in the top-level secrets section", secret.Source)) } else if s.Name != "" && s.Name != secret.Source { return fmt.Errorf("unsupported secret %q: cannot override name %q", secret.Source, s.Name) // TODO: support custom secret names } else if !s.External { - term.Warnf("unsupported secret %q: not marked external:true", secret.Source) // TODO: support secrets from environment/file + slog.Warn(fmt.Sprintf("unsupported secret %q: not marked external:true", secret.Source)) // TODO: support secrets from environment/file } } @@ -212,8 +212,8 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P // show warning if sensitive information is detected if isSecret { - term.Warnf("service %q: environment %q may contain sensitive information; consider using 'defang config set %s' to securely store this value", svccfg.Name, key, key) - term.Debugf("service %q: environment %q may contain detected secrets of type: %v", svccfg.Name, key, ds) + slog.Warn(fmt.Sprintf("service %q: environment %q may contain sensitive information; consider using 'defang config set %s' to securely store this value", svccfg.Name, key, key)) + slog.Debug(fmt.Sprintf("service %q: environment %q may contain detected secrets of type: %v", svccfg.Name, key, ds)) } } } @@ -226,7 +226,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P // Show a warning when we have ingress ports but no explicit healthcheck for _, port := range svccfg.Ports { if port.Mode == Mode_INGRESS { - term.Warnf("service %q: ingress port %d without healthcheck; defaults to GET / HTTP/1.1", svccfg.Name, port.Target) + slog.Warn(fmt.Sprintf("service %q: ingress port %d without healthcheck; defaults to GET / HTTP/1.1", svccfg.Name, port.Target)) break } } @@ -235,14 +235,14 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P if svccfg.HealthCheck.Timeout != nil { timeout = time.Duration(*svccfg.HealthCheck.Timeout).Seconds() if _, frac := math.Modf(timeout); frac != 0 { - term.Warnf("service %q: healthcheck timeout must be a multiple of 1s", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: healthcheck timeout must be a multiple of 1s", svccfg.Name)) } } interval := 30.0 // default per compose spec if svccfg.HealthCheck.Interval != nil { interval = time.Duration(*svccfg.HealthCheck.Interval).Seconds() if _, frac := math.Modf(interval); frac != 0 { - term.Warnf("service %q: healthcheck interval must be a multiple of 1s", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: healthcheck interval must be a multiple of 1s", svccfg.Name)) } } // Technically this should test for <= but both interval and timeout have 30s as the default value @@ -250,10 +250,10 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: healthcheck timeout %fs must be positive and smaller than the interval %fs", svccfg.Name, timeout, interval) } if svccfg.HealthCheck.StartPeriod != nil { - term.Debugf("service %q: unsupported compose directive: healthcheck start_period", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: healthcheck start_period", svccfg.Name)) } if svccfg.HealthCheck.StartInterval != nil { - term.Debugf("service %q: unsupported compose directive: healthcheck start_interval", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: healthcheck start_interval", svccfg.Name)) } } var replicas int @@ -275,29 +275,29 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: deploy endpoint_mode", svccfg.Name) } if svccfg.Deploy.Resources.Limits != nil && svccfg.Deploy.Resources.Reservations == nil { - term.Debugf("service %q: no reservations specified; using limits as reservations", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: no reservations specified; using limits as reservations", svccfg.Name)) } reservations = getResourceReservations(svccfg.Deploy.Resources) if reservations != nil && reservations.NanoCPUs < 0 { // "0" just means "as small as possible" return fmt.Errorf("service %q: invalid value for cpus: %v", svccfg.Name, reservations.NanoCPUs) } if len(svccfg.Deploy.Labels) > 0 { - term.Debugf("service %q: unsupported compose directive: deploy labels", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: deploy labels", svccfg.Name)) } if len(svccfg.Deploy.Placement.Constraints) != 0 || len(svccfg.Deploy.Placement.Preferences) != 0 || svccfg.Deploy.Placement.MaxReplicas != 0 { - term.Debugf("service %q: unsupported compose directive: deploy placement", svccfg.Name) + slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: deploy placement", svccfg.Name)) } if svccfg.Deploy.Replicas != nil { replicas = *svccfg.Deploy.Replicas } } if mode == modes.ModeHighAvailability && replicas < 2 && svccfg.Extensions["x-defang-autoscaling"] == nil { - term.Warnf("service %q: high-availability mode requires at least 2 replicas or x-defang-autoscaling", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: high-availability mode requires at least 2 replicas or x-defang-autoscaling", svccfg.Name)) } if reservations == nil || reservations.MemoryBytes == 0 { // Don't show this warning for managed pseudo-services like CDN if svccfg.Extensions["x-defang-static-files"] == nil { - term.Warnf("service %q: missing memory reservation; using provider-specific defaults. Specify deploy.resources.reservations.memory to avoid out-of-memory errors", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: missing memory reservation; using provider-specific defaults. Specify deploy.resources.reservations.memory to avoid out-of-memory errors", svccfg.Name)) } } @@ -321,7 +321,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P if managedRedis { // Ensure the repo is a valid Redis repo if !IsRedisRepo(repo) { - term.Warnf("service %q: managed Redis service should use a redis or valkey image", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: managed Redis service should use a redis or valkey image", svccfg.Name)) } if _, err = validateManagedStore(redisExtension); err != nil { return fmt.Errorf("service %q: %w", svccfg.Name, err) @@ -332,7 +332,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P if managedPostgres { // Ensure the repo is a valid Postgres repo if !IsPostgresRepo(repo) { - term.Warnf("service %q: managed Postgres service should use a postgres image", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: managed Postgres service should use a postgres image", svccfg.Name)) } if _, err = validateManagedStore(postgresExtension); err != nil { return fmt.Errorf("service %q: %w", svccfg.Name, err) @@ -343,7 +343,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P if managedMongodb { // Ensure the repo is a valid MongoDB repo if !IsMongoRepo(repo) { - term.Warnf("service %q: managed MongoDB service should use a mongo image", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: managed MongoDB service should use a mongo image", svccfg.Name)) } if _, err = validateManagedStore(mongodbExtension); err != nil { return fmt.Errorf("service %q: %w", svccfg.Name, err) @@ -351,7 +351,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P } if !managedRedis && !managedPostgres && !managedMongodb && isStatefulImage(svccfg.Image) { - term.Warnf("service %q: stateful service will lose data on restart; use a managed service instead", svccfg.Name) + slog.Warn(fmt.Sprintf("service %q: stateful service will lose data on restart; use a managed service instead", svccfg.Name)) } for k := range svccfg.Extensions { @@ -365,7 +365,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P "x-defang-autoscaling": continue default: - term.Warnf("service %q: unsupported compose extension: %q", svccfg.Name, k) + slog.Warn(fmt.Sprintf("service %q: unsupported compose extension: %q", svccfg.Name, k)) } } @@ -401,19 +401,19 @@ func validatePort(port composeTypes.ServicePortConfig) error { portRange := strings.SplitN(port.Published, "-", 2) start, err := strconv.ParseUint(portRange[0], 10, 16) if err != nil { - term.Warnf("port %d: 'published' range start should be an integer; ignoring 'published: %v'", port.Target, portRange[0]) + slog.Warn(fmt.Sprintf("port %d: 'published' range start should be an integer; ignoring 'published: %v'", port.Target, portRange[0])) } else if len(portRange) == 2 { end, err := strconv.ParseUint(portRange[1], 10, 16) if err != nil { - term.Warnf("port %d: 'published' range end should be an integer; ignoring 'published: %v'", port.Target, portRange[1]) + slog.Warn(fmt.Sprintf("port %d: 'published' range end should be an integer; ignoring 'published: %v'", port.Target, portRange[1])) } else if start > end { - term.Warnf("port %d: 'published' range start should be less than end; ignoring 'published: %v'", port.Target, port.Published) + slog.Warn(fmt.Sprintf("port %d: 'published' range start should be less than end; ignoring 'published: %v'", port.Target, port.Published)) } else if port.Target < uint32(start) || port.Target > uint32(end) { - term.Warnf("port %d: 'published' range should include 'target'; ignoring 'published: %v'", port.Target, port.Published) + slog.Warn(fmt.Sprintf("port %d: 'published' range should include 'target'; ignoring 'published: %v'", port.Target, port.Published)) } } else { if start != uint64(port.Target) { - term.Warnf("port %d: 'published' should be equal to 'target'; ignoring 'published: %v'", port.Target, port.Published) + slog.Warn(fmt.Sprintf("port %d: 'published' should be equal to 'target'; ignoring 'published: %v'", port.Target, port.Published)) } } } diff --git a/src/pkg/cli/compose/validation_test.go b/src/pkg/cli/compose/validation_test.go index cd58976f3..f6c3d2f8f 100644 --- a/src/pkg/cli/compose/validation_test.go +++ b/src/pkg/cli/compose/validation_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "errors" + "log/slog" "os" "slices" "strings" @@ -11,6 +12,7 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/modes" "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" @@ -37,8 +39,9 @@ func TestValidationAndConvert(t *testing.T) { } testAllComposeFiles(t, func(t *testing.T, name, path string) { - logs := new(bytes.Buffer) - term.DefaultTerm = term.NewTerm(os.Stdin, logs, logs) + logBuf := new(bytes.Buffer) + term.DefaultTerm = term.NewTerm(os.Stdin, logBuf, logBuf) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) options := LoaderOptions{ConfigPaths: []string{path}} loader := Loader{options: options} @@ -53,7 +56,7 @@ func TestValidationAndConvert(t *testing.T) { if err := FixupServices(t.Context(), mockClient, project, UploadModeIgnore); err != nil { t.Logf("Service conversion failed: %v", err) - logs.WriteString("Error: " + err.Error() + "\n") // no coverage! + logBuf.WriteString("Error: " + err.Error() + "\n") // no coverage! } listConfigNames, err := listConfigNamesFunc(t.Context()) @@ -62,7 +65,7 @@ func TestValidationAndConvert(t *testing.T) { } if err := ValidateProjectConfig(project, listConfigNames); err != nil { t.Logf("Project config validation failed: %v", err) - logs.WriteString("Error: " + err.Error() + "\n") + logBuf.WriteString("Error: " + err.Error() + "\n") } mode := modes.ModeAffordable @@ -71,16 +74,16 @@ func TestValidationAndConvert(t *testing.T) { } if err := ValidateProject(project, mode); err != nil { t.Logf("Project validation failed: %v", err) - logs.WriteString("Error: " + err.Error() + "\n") // no coverage! + logBuf.WriteString("Error: " + err.Error() + "\n") // no coverage! } // The order of the services is not guaranteed, so we sort the logs before comparing - logLines := strings.SplitAfter(logs.String(), "\n") + logLines := strings.SplitAfter(logBuf.String(), "\n") slices.Sort(logLines) - logs = bytes.NewBufferString(strings.Join(logLines, "")) + logBuf = bytes.NewBufferString(strings.Join(logLines, "")) // Compare the logs with the warnings file - if err := pkg.Compare(logs.Bytes(), path+".warnings"); err != nil { + if err := pkg.Compare(logBuf.Bytes(), path+".warnings"); err != nil { t.Error(err) } }) diff --git a/src/pkg/cli/composeDown.go b/src/pkg/cli/composeDown.go index 4e9f869a4..e4ab2ffbd 100644 --- a/src/pkg/cli/composeDown.go +++ b/src/pkg/cli/composeDown.go @@ -3,6 +3,8 @@ package cli import ( "context" "errors" + "fmt" + "log/slog" "github.com/AlecAivazis/survey/v2" "github.com/DefangLabs/defang/src/pkg/cli/client" @@ -12,7 +14,7 @@ import ( ) func ComposeDown(ctx context.Context, projectName string, fabric client.FabricClient, provider client.Provider) (types.ETag, error) { - term.Debugf("Destroying project %q", projectName) + slog.Debug(fmt.Sprintf("Destroying project %q", projectName)) // If no names are provided, destroy the entire project return CdCommand(ctx, projectName, provider, fabric, client.CdCommandDestroy) @@ -33,6 +35,6 @@ func InteractiveComposeDown(ctx context.Context, projectName string, fabric clie return "", ErrDoNotComposeDown } - term.Info("Deactivating project " + projectName) + slog.Info("Deactivating project " + projectName) return ComposeDown(ctx, projectName, fabric, provider) } diff --git a/src/pkg/cli/composeUp.go b/src/pkg/cli/composeUp.go index 0489f8b5d..183c2a3c6 100644 --- a/src/pkg/cli/composeUp.go +++ b/src/pkg/cli/composeUp.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" @@ -44,7 +45,7 @@ func checkDeploymentMode(prevMode, newMode modes.Mode) (modes.Mode, error) { switch newMode { case modes.ModeUnspecified: if prevMode != modes.ModeUnspecified { - term.Debug("No deployment mode specified; using previous deployment mode:", prevMode) + slog.Debug(fmt.Sprintln("No deployment mode specified; using previous deployment mode:", prevMode)) newMode = prevMode } case modes.ModeAffordable: @@ -52,11 +53,11 @@ func checkDeploymentMode(prevMode, newMode modes.Mode) (modes.Mode, error) { case modes.ModeHighAvailability: return newMode, fmt.Errorf("will not downgrade deployment mode from %s to %s; use %s", prevMode, newMode, modes.ModeBalanced) case modes.ModeBalanced: - term.Warnf("Downgrading deployment mode from %s to %s", prevMode, newMode) + slog.Warn(fmt.Sprintf("Downgrading deployment mode from %s to %s", prevMode, newMode)) } case modes.ModeBalanced: if prevMode == modes.ModeHighAvailability { - term.Warnf("Downgrading deployment mode from %s to %s", prevMode, newMode) + slog.Warn(fmt.Sprintf("Downgrading deployment mode from %s to %s", prevMode, newMode)) } case modes.ModeHighAvailability: // from anything to high-availability is allowed @@ -119,7 +120,7 @@ func ComposeUp(ctx context.Context, fabric client.FabricClient, provider client. Stack: provider.GetStackNameForDomain(), }) if err != nil { - term.Debug("GetDelegateSubdomainZone failed:", err) + slog.Debug(fmt.Sprintln("GetDelegateSubdomainZone failed:", err)) return nil, project, errors.New("failed to get delegate domain") } @@ -208,8 +209,8 @@ func ComposeUp(ctx context.Context, fabric client.FabricClient, provider client. CdId: resp.CdId, }) if err != nil { - term.Debug("Failed to record deployment:", err) - term.Warn("Unable to update deployment history; deployment will proceed anyway.") + slog.Debug(fmt.Sprintln("Failed to record deployment:", err)) + slog.Warn("Unable to update deployment history; deployment will proceed anyway.") } if term.DoDebug() { diff --git a/src/pkg/cli/configDelete.go b/src/pkg/cli/configDelete.go index 441e884a9..29cddef10 100644 --- a/src/pkg/cli/configDelete.go +++ b/src/pkg/cli/configDelete.go @@ -2,15 +2,16 @@ package cli import ( "context" + "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/dryrun" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) func ConfigDelete(ctx context.Context, projectName string, provider client.Provider, names ...string) error { - term.Debugf("Deleting config %v in project %q", names, projectName) + slog.Debug(fmt.Sprintf("Deleting config %v in project %q", names, projectName)) if dryrun.DoDryRun { return dryrun.ErrDryRun diff --git a/src/pkg/cli/configList.go b/src/pkg/cli/configList.go index 334e53c15..e3bffc248 100644 --- a/src/pkg/cli/configList.go +++ b/src/pkg/cli/configList.go @@ -2,6 +2,8 @@ package cli import ( "context" + "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/term" @@ -13,7 +15,7 @@ type PrintConfig struct { } func ConfigList(ctx context.Context, projectName string, provider client.Provider) error { - term.Debugf("Listing config in project %q", projectName) + slog.Debug(fmt.Sprintf("Listing config in project %q", projectName)) config, err := provider.ListConfig(ctx, &defangv1.ListConfigsRequest{Project: projectName}) if err != nil { @@ -22,8 +24,8 @@ func ConfigList(ctx context.Context, projectName string, provider client.Provide numConfigs := len(config.Names) if numConfigs == 0 { - _, err := term.Warn("No configs found") - return err + slog.Warn("No configs found") + return nil } configNames := make([]PrintConfig, numConfigs) diff --git a/src/pkg/cli/configList_test.go b/src/pkg/cli/configList_test.go index e22da4515..4a66cf35c 100644 --- a/src/pkg/cli/configList_test.go +++ b/src/pkg/cli/configList_test.go @@ -2,12 +2,14 @@ package cli import ( "context" + "log/slog" "net/http/httptest" "strings" "testing" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" @@ -53,6 +55,7 @@ func TestConfigList(t *testing.T) { t.Run("no configs", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := ConfigList(ctx, "emptyconfigs", &provider) if err != nil { @@ -69,6 +72,7 @@ func TestConfigList(t *testing.T) { t.Run("some configs", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := ConfigList(ctx, "test", &provider) if err != nil { diff --git a/src/pkg/cli/configResolution.go b/src/pkg/cli/configResolution.go index a5ddd5a9e..482e046e6 100644 --- a/src/pkg/cli/configResolution.go +++ b/src/pkg/cli/configResolution.go @@ -2,6 +2,7 @@ package cli import ( "context" + "log/slog" "slices" "strings" @@ -112,7 +113,7 @@ func printConfigResolutionSummary(project *types.Project, defangConfig []string, projectEnvVars = slices.Compact(projectEnvVars) - term.Info("Service environment variables resolution summary:") + slog.Info("Service environment variables resolution summary:") return term.Table(projectEnvVars, "Service", "Environment", "Source", "Value") } diff --git a/src/pkg/cli/configResolution_test.go b/src/pkg/cli/configResolution_test.go index 927cfc60b..849d8d37d 100644 --- a/src/pkg/cli/configResolution_test.go +++ b/src/pkg/cli/configResolution_test.go @@ -1,6 +1,7 @@ package cli import ( + "log/slog" "os" "path/filepath" "regexp" @@ -8,12 +9,14 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli/compose" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" ) func TestPrintConfigResolutionSummary(t *testing.T) { testAllConfigResolutionFiles(t, "testdata/config-resolution", func(t *testing.T, name, path string) { stdout, _ := term.SetupTestTerm(t) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) loader := compose.NewLoader(compose.WithPath(path)) proj, err := loader.LoadProject(t.Context()) @@ -53,6 +56,7 @@ func TestPrintConfigResolutionSummary(t *testing.T) { func TestPrintRedactedConfigResolutionSummary(t *testing.T) { testAllConfigResolutionFiles(t, "testdata/redact-config", func(t *testing.T, name, path string) { stdout, _ := term.SetupTestTerm(t) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) loader := compose.NewLoader(compose.WithPath(path)) proj, err := loader.LoadProject(t.Context()) diff --git a/src/pkg/cli/configSet.go b/src/pkg/cli/configSet.go index bfa7b2bb2..5e5032035 100644 --- a/src/pkg/cli/configSet.go +++ b/src/pkg/cli/configSet.go @@ -3,10 +3,10 @@ package cli import ( "context" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/dryrun" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -28,7 +28,7 @@ type ConfigManager interface { } func ConfigSet(ctx context.Context, projectName string, provider ConfigManager, name string, value string, options ConfigSetOptions) (bool, error) { - term.Debugf("Setting config %q in project %q", name, projectName) + slog.Debug(fmt.Sprintf("Setting config %q in project %q", name, projectName)) if !pkg.IsValidSecretName(name) { return false, ErrInvalidConfigName{Name: name} diff --git a/src/pkg/cli/connect.go b/src/pkg/cli/connect.go index e46ebcf77..46182ef34 100644 --- a/src/pkg/cli/connect.go +++ b/src/pkg/cli/connect.go @@ -2,19 +2,20 @@ package cli import ( "context" + "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/aws" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/do" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/gcp" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" ) // Connect builds a client carrying the requested tenant (name or ID). func Connect(fabricAddr string, requestedTenant types.TenantNameOrID) *client.GrpcClient { host := client.NormalizeHost(fabricAddr) - term.Debugf("Using tenant %q for cluster %q", requestedTenant, host) + slog.Debug(fmt.Sprintf("Using tenant %q for cluster %q", requestedTenant, host)) accessToken := client.GetExistingToken(host) return client.NewGrpcClient(host, accessToken, requestedTenant) @@ -25,7 +26,7 @@ func ConnectWithTenant(ctx context.Context, fabricAddr string, requestedTenant t resp, err := grpcClient.WhoAmI(ctx) if err != nil { - term.Debug("Unable to validate tenant with server:", err) + slog.Debug(fmt.Sprintln("Unable to validate tenant with server:", err)) return grpcClient, err } @@ -35,7 +36,7 @@ func ConnectWithTenant(ctx context.Context, fabricAddr string, requestedTenant t func NewProvider(ctx context.Context, providerID client.ProviderID, fabricClient client.FabricClient, stack string) client.Provider { var provider client.Provider - term.Debugf("Creating %s provider", providerID) + slog.Debug(fmt.Sprintf("Creating %s provider", providerID)) switch providerID { case client.ProviderAWS: provider = aws.NewByocProvider(ctx, fabricClient.GetTenantName(), stack) diff --git a/src/pkg/cli/deploymentsList.go b/src/pkg/cli/deploymentsList.go index 92f33194e..c5cb23372 100644 --- a/src/pkg/cli/deploymentsList.go +++ b/src/pkg/cli/deploymentsList.go @@ -2,6 +2,8 @@ package cli import ( "context" + "fmt" + "log/slog" "sort" "strings" "time" @@ -46,13 +48,12 @@ func DeploymentsList(ctx context.Context, client client.FabricClient, params Lis if params.ListType == defangv1.DeploymentType_DEPLOYMENT_TYPE_ACTIVE { active = " active" } - var err error if params.ProjectName == "" { - _, err = term.Warnf("No%s deployments found; use --workspace to specify a different workspace", active) + slog.Warn(fmt.Sprintf("No%s deployments found; use --workspace to specify a different workspace", active)) } else { - _, err = term.Warnf("No%s deployments found for project %q", active, params.ProjectName) + slog.Warn(fmt.Sprintf("No%s deployments found for project %q", active, params.ProjectName)) } - return err + return nil } // map to Deployment struct diff --git a/src/pkg/cli/deploymentsList_test.go b/src/pkg/cli/deploymentsList_test.go index b6b646ad3..124ebd8f5 100644 --- a/src/pkg/cli/deploymentsList_test.go +++ b/src/pkg/cli/deploymentsList_test.go @@ -2,12 +2,14 @@ package cli import ( "context" + "log/slog" "net/http/httptest" "strings" "testing" "time" connect "connectrpc.com/connect" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" @@ -62,6 +64,7 @@ func TestDeploymentsList(t *testing.T) { t.Run("no deployments", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_HISTORY, ProjectName: "empty", @@ -81,6 +84,7 @@ func TestDeploymentsList(t *testing.T) { t.Run("some deployments", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_HISTORY, ProjectName: "test", @@ -133,6 +137,7 @@ func TestActiveDeployments(t *testing.T) { t.Run("no active deployments", func(t *testing.T) { fabricServer.testDeploymentsData = nil stdout, _ := term.SetupTestTerm(t) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_ACTIVE, @@ -169,6 +174,7 @@ func TestActiveDeployments(t *testing.T) { fabricServer.testDeploymentsData = activeDeployments stdout, _ := term.SetupTestTerm(t) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_ACTIVE, ProjectName: "", diff --git a/src/pkg/cli/estimate.go b/src/pkg/cli/estimate.go index 4ea2ffd8f..3f66f14b1 100644 --- a/src/pkg/cli/estimate.go +++ b/src/pkg/cli/estimate.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "sort" "strconv" @@ -21,13 +22,13 @@ import ( ) func RunEstimate(ctx context.Context, project *compose.Project, client client.FabricClient, previewProvider client.Provider, estimateProviderID client.ProviderID, region string, mode modes.Mode) (*defangv1.EstimateResponse, error) { - term.Debugf("Running estimate for project %s in region %s with mode %s", project.Name, region, mode) + slog.Debug(fmt.Sprintf("Running estimate for project %s in region %s with mode %s", project.Name, region, mode)) preview, err := GeneratePreview(ctx, project, client, previewProvider, estimateProviderID, mode, region) if err != nil { return nil, err } - term.Info("Preparing estimate") + slog.Info("Preparing estimate") estimate, err := client.Estimate(ctx, &defangv1.EstimateRequest{ Provider: estimateProviderID.Value(), @@ -54,7 +55,7 @@ func GeneratePreview(ctx context.Context, project *compose.Project, client clien return "", fmt.Errorf("failed to marshal compose project: %w", err) } - term.Debugf("Fixedup project: %s", string(composeData)) + slog.Debug("Fixedup project: " + string(composeData)) resp, err := client.Preview(ctx, &defangv1.PreviewRequest{ Provider: estimateProviderID.Value(), @@ -67,7 +68,7 @@ func GeneratePreview(ctx context.Context, project *compose.Project, client clien return "", err } - term.Info("Generating deployment preview, this may take a few minutes...") + slog.Info("Generating deployment preview, this may take a few minutes...") var pulumiPreviewLogLines []string tailOptions := TailOptions{ Deployment: resp.Etag, diff --git a/src/pkg/cli/generate.go b/src/pkg/cli/generate.go index 3147ed6ca..d33da10d6 100644 --- a/src/pkg/cli/generate.go +++ b/src/pkg/cli/generate.go @@ -2,6 +2,7 @@ package cli import ( "context" + "log/slog" "os" "path/filepath" @@ -22,7 +23,7 @@ type GenerateArgs struct { func GenerateWithAI(ctx context.Context, client client.FabricClient, args GenerateArgs) ([]string, error) { if dryrun.DoDryRun { - term.Warn("Dry run, no project files will be generated") + slog.Warn("Dry run, no project files will be generated") return nil, dryrun.ErrDryRun } @@ -48,7 +49,7 @@ func GenerateWithAI(ctx context.Context, client client.FabricClient, args Genera } // Write each file to disk - term.Info("Writing files to disk...") + slog.Info("Writing files to disk...") if err := os.MkdirAll(args.Folder, 0755); err != nil { return nil, err } diff --git a/src/pkg/cli/getServices.go b/src/pkg/cli/getServices.go index f962a8acd..a50500b7b 100644 --- a/src/pkg/cli/getServices.go +++ b/src/pkg/cli/getServices.go @@ -3,6 +3,7 @@ package cli import ( "context" "fmt" + "log/slog" "net/http" "net/url" "strings" @@ -51,7 +52,7 @@ func PrintLongServices(ctx context.Context, projectName string, provider client. } func GetServices(ctx context.Context, projectName string, provider client.Provider) ([]ServiceLineItem, error) { - term.Debugf("Listing services in project %q", projectName) + slog.Debug(fmt.Sprintf("Listing services in project %q", projectName)) servicesResponse, err := provider.GetServices(ctx, &defangv1.GetServicesRequest{Project: projectName}) if err != nil { @@ -112,7 +113,7 @@ func GetHealthcheckResults(ctx context.Context, serviceInfos []*defangv1.Service defer wg.Done() result, err := RunHealthcheck(ctx, serviceInfo.Service.Name, "https://"+endpoint, serviceInfo.HealthcheckPath) if err != nil { - term.Debugf("Healthcheck error for service %q at endpoint %q: %s", serviceInfo.Service.Name, endpoint, err.Error()) + slog.Debug(fmt.Sprintf("Healthcheck error for service %q at endpoint %q: %s", serviceInfo.Service.Name, endpoint, err.Error())) result = "error" } *results[serviceInfo.Service.Name] = result @@ -135,17 +136,17 @@ func RunHealthcheck(ctx context.Context, name, endpoint, path string) (string, e if err != nil { return "", err } - term.Debugf("[%s] checking health at %s", name, url) + slog.Debug(fmt.Sprintf("[%s] checking health at %s", name, url)) resp, err := http.DefaultClient.Do(req) if err != nil { return "", err } defer resp.Body.Close() if resp.StatusCode >= 200 && resp.StatusCode < 400 { - term.Debugf("[%s] ✔ healthy", name) + slog.Debug(fmt.Sprintf("[%s] ✔ healthy", name)) return "healthy", nil } else { - term.Debugf("[%s] ✘ unhealthy (%s)", name, resp.Status) + slog.Debug(fmt.Sprintf("[%s] ✘ unhealthy (%s)", name, resp.Status)) return "unhealthy (" + resp.Status + ")", nil } } @@ -211,7 +212,7 @@ func PrintServiceStatesAndEndpoints(services []ServiceLineItem) error { } if showCertGenerateHint { - term.Info("Run `defang cert generate` to get a TLS certificate for your service(s)") + slog.Info("Run `defang cert generate` to get a TLS certificate for your service(s)") } return nil diff --git a/src/pkg/cli/getServices_test.go b/src/pkg/cli/getServices_test.go index 15e66f5ad..be907ddf5 100644 --- a/src/pkg/cli/getServices_test.go +++ b/src/pkg/cli/getServices_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "errors" + "log/slog" "net/http" "net/http/httptest" "os" @@ -13,6 +14,7 @@ import ( "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" @@ -268,6 +270,7 @@ func TestPrintServiceStatesAndEndpointsAndDomainname(t *testing.T) { var stdout, stderr bytes.Buffer term.DefaultTerm = term.NewTerm(os.Stdin, &stdout, &stderr) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) tests := []struct { name string diff --git a/src/pkg/cli/install_cd.go b/src/pkg/cli/install_cd.go index 6e2db7392..023cc7d85 100644 --- a/src/pkg/cli/install_cd.go +++ b/src/pkg/cli/install_cd.go @@ -3,16 +3,16 @@ package cli import ( "context" "errors" + "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/dryrun" - "github.com/DefangLabs/defang/src/pkg/term" ) func InstallCD(ctx context.Context, provider client.Provider, force bool) error { if dryrun.DoDryRun { return errors.New("dry run") } - term.Info("Installing the CD resources into the cluster") + slog.Info("Installing the CD resources into the cluster") return provider.SetUpCD(ctx, force) } diff --git a/src/pkg/cli/logout.go b/src/pkg/cli/logout.go index c9fda9a9b..3963c51d7 100644 --- a/src/pkg/cli/logout.go +++ b/src/pkg/cli/logout.go @@ -2,15 +2,16 @@ package cli import ( "context" + "fmt" + "log/slog" "os" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" ) func Logout(ctx context.Context, fabricClient client.FabricClient, fabricAddr string) error { - term.Debug("Logging out") + slog.Debug("Logging out") err := fabricClient.RevokeToken(ctx) // Ignore unauthenticated errors, since we're logging out anyway if err != nil && connect.CodeOf(err) != connect.CodeUnauthenticated { @@ -18,7 +19,7 @@ func Logout(ctx context.Context, fabricClient client.FabricClient, fabricAddr st } if err := client.TokenStore.Delete(client.TokenStorageName(fabricAddr)); err != nil { - term.Warn("Failed to remove stored token:", err) + slog.Warn(fmt.Sprintln("Failed to remove stored token:", err)) // Don't return the error - we still consider logout successful } @@ -26,9 +27,9 @@ func Logout(ctx context.Context, fabricClient client.FabricClient, fabricAddr st jwtFile, err := client.GetWebIdentityTokenFile(fabricAddr) if err == nil { if err := os.Remove(jwtFile); err != nil && !os.IsNotExist(err) { - term.Warn("Failed to remove JWT token file:", err) + slog.Warn(fmt.Sprintln("Failed to remove JWT token file:", err)) } else if err == nil { - term.Debug("Removed JWT token file:", jwtFile) + slog.Debug(fmt.Sprintln("Removed JWT token file:", jwtFile)) } } diff --git a/src/pkg/cli/new.go b/src/pkg/cli/new.go index cdc465536..4c069c351 100644 --- a/src/pkg/cli/new.go +++ b/src/pkg/cli/new.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -36,7 +37,7 @@ func FetchSamples(ctx context.Context) ([]Sample, error) { return nil, err } defer resp.Body.Close() - term.Debug(resp.Header) + slog.Debug(fmt.Sprintf("%v", resp.Header)) reader := resp.Body if resp.Header.Get("Content-Encoding") == "gzip" { reader, err = gzip.NewReader(resp.Body) @@ -69,14 +70,14 @@ func copyFromSamples(ctx context.Context, dir string, names []string, skipExisti return err } defer resp.Body.Close() - term.Debug(resp.Header) + slog.Debug(fmt.Sprintf("%v", resp.Header)) tarball, err := gzip.NewReader(resp.Body) if err != nil { return fmt.Errorf("failed to read tarball: %w", err) } defer tarball.Close() tarReader := tar.NewReader(tarball) - term.Info("Copying files to disk...") + slog.Info("Copying files to disk...") sampleFound := false @@ -114,7 +115,7 @@ func copyFromSamples(ctx context.Context, dir string, names []string, skipExisti if !skipExisting || !os.IsExist(err) { return err } - term.Warnf("File already exists, skipping: %q", path) + slog.Warn(fmt.Sprintf("File already exists, skipping: %q", path)) } } } diff --git a/src/pkg/cli/subscribe.go b/src/pkg/cli/subscribe.go index 6b0b95d4a..3fa5796ec 100644 --- a/src/pkg/cli/subscribe.go +++ b/src/pkg/cli/subscribe.go @@ -3,11 +3,12 @@ package cli import ( "context" "errors" + "fmt" "iter" + "log/slog" "connectrpc.com/connect" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -24,7 +25,7 @@ func WaitServiceState( etag types.ETag, services []string, ) (ServiceStates, error) { - term.Debugf("waiting for services %v to reach state %s\n", services, targetState) // TODO: don't print in Go-routine + slog.Debug(fmt.Sprintf("waiting for services %v to reach state %s\n", services, targetState)) // TODO: don't print in Go-routine if len(services) == 0 { return nil, ErrNothingToMonitor @@ -57,9 +58,9 @@ func WaitServiceState( // a minute and DelayBeforeRetry backs off exponentially up to 1 minute). if isTransientError(err) { if connect.CodeOf(err) == connect.CodeResourceExhausted { - term.Warnf("quota exceeded; will retry subscribe stream after backoff: %v", err) + slog.Warn(fmt.Sprintf("quota exceeded; will retry subscribe stream after backoff: %v", err)) } else { - term.Debugf("WaitServiceState: transient error, reconnecting subscribe stream: %v", err) + slog.Debug(fmt.Sprintf("WaitServiceState: transient error, reconnecting subscribe stream: %v", err)) } if err := provider.DelayBeforeRetry(ctx); err != nil { return serviceStates, err @@ -82,16 +83,16 @@ func WaitServiceState( } } - term.Infof("Waiting for services to finish deploying: %q\n", pendingServices) // TODO: don't print in Go-routine + slog.Info(fmt.Sprintf("Waiting for services to finish deploying: %q\n", pendingServices)) // TODO: don't print in Go-routine if msg == nil { continue } - term.Debugf("Service update: %s: state=%s and status=%s\n", msg.Name, msg.State, msg.Status) // TODO: don't print in Go-routine + slog.Debug(fmt.Sprintf("Service update: %s: state=%s and status=%s\n", msg.Name, msg.State, msg.Status)) // TODO: don't print in Go-routine if _, ok := serviceStates[msg.Name]; !ok { - term.Debugf("unexpected service %s update", msg.Name) // TODO: don't print in Go-routine + slog.Debug(fmt.Sprintf("unexpected service %s update", msg.Name)) // TODO: don't print in Go-routine continue } if msg.State == defangv1.ServiceState_NOT_SPECIFIED { diff --git a/src/pkg/cli/tail.go b/src/pkg/cli/tail.go index 62f6afa48..06d741ce7 100644 --- a/src/pkg/cli/tail.go +++ b/src/pkg/cli/tail.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "iter" + "log/slog" "net" "os" "regexp" @@ -146,7 +147,7 @@ func Tail(ctx context.Context, provider client.Provider, projectName string, opt options.LogType = logs.LogTypeAll } - term.Debugf("Tailing %s logs in project %q", options.LogType, projectName) + slog.Debug(fmt.Sprintf("Tailing %s logs in project %q", options.LogType, projectName)) if options.Deployment != "" { _, err := types.ParseEtag(options.Deployment) @@ -161,11 +162,11 @@ func Tail(ctx context.Context, provider client.Provider, projectName string, opt if _, err := provider.GetService(ctx, &defangv1.GetRequest{Project: projectName, Name: service}); err != nil { switch connect.CodeOf(err) { case connect.CodeNotFound: - term.Warnf("Service does not exist (yet): %q", service) + slog.Warn(fmt.Sprintf("Service does not exist (yet): %q", service)) case connect.CodeUnknown: // Ignore unknown (nil) errors default: - term.Warn(err) // TODO: use client.PrettyError(…) + slog.Warn(fmt.Sprintf("%v", err)) // TODO: use client.PrettyError(…) } } } @@ -241,7 +242,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin Limit: options.Limit, } - term.Debug("Tail request:", tailRequest) + slog.Debug(fmt.Sprintln("Tail request:", tailRequest)) logSeq, err := provider.QueryLogs(ctx, tailRequest) if err != nil { @@ -268,7 +269,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if oldState, err := term.MakeUnbuf(int(os.Stdin.Fd())); err == nil { defer term.Restore(int(os.Stdin.Fd()), oldState) - term.Info("Showing only build logs and runtime errors. Press V to toggle verbose mode.") + slog.Info("Showing only build logs and runtime errors. Press V to toggle verbose mode.") input := term.NewNonBlockingStdin() defer input.Close() // abort the read loop go func() { @@ -290,7 +291,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if debug { debugStr = "ON" } - term.Info("Debug mode", debugStr) + slog.Info(fmt.Sprintln("Debug mode", debugStr)) track.Evt("Debug Toggled", P("debug", debug)) case 'v', 'V': verbose := !options.Verbose @@ -302,7 +303,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if toggleCount++; toggleCount == 4 && !verbose { modeStr += ". I like the way you work it, no verbosity." } - term.Info("Verbose mode", modeStr) + slog.Info(fmt.Sprintln("Verbose mode", modeStr)) track.Evt("Verbose Toggled", P("verbose", verbose), P("toggleCount", toggleCount)) } } @@ -328,7 +329,7 @@ func makeHeadBookendOptions(options *TailOptions, firstLogTime time.Time) *TailO func printHeadBookend(options *TailOptions, firstLogTime time.Time) { newOptions := makeHeadBookendOptions(options, firstLogTime) if !newOptions.Until.IsZero() { - term.Info("To view older logs, run: `defang logs" + newOptions.String() + "`") + slog.Info("To view older logs, run: `defang logs" + newOptions.String() + "`") } } @@ -346,7 +347,7 @@ func makeTailBookendOptions(options *TailOptions, lastLogTime time.Time) *TailOp func printTailBookend(options *TailOptions, lastLogTime time.Time) { newOptions := makeTailBookendOptions(options, lastLogTime) if !newOptions.Since.IsZero() { - term.Info("To view more recent logs, run: `defang logs" + newOptions.String() + "`") + slog.Info("To view more recent logs, run: `defang logs" + newOptions.String() + "`") } } @@ -376,10 +377,11 @@ func receiveLogs(ctx context.Context, provider client.Provider, projectName stri // Reconnect on transient errors if isTransientError(err) { - term.Debug("Disconnected:", err) + slog.Debug(fmt.Sprintln("Disconnected:", err)) var spaces int if !options.Raw { - spaces, _ = term.Warnf("Reconnecting...\r") // overwritten below + slog.Warn("Reconnecting...\r") + spaces = len(" ! Reconnecting...\r") // warnPrefix + message, used to clear the line } if err := provider.DelayBeforeRetry(ctx); err != nil { return err @@ -388,7 +390,7 @@ func receiveLogs(ctx context.Context, provider client.Provider, projectName stri stop() // stop the old iterator newLogSeq, err := provider.QueryLogs(ctx, tailRequest) if err != nil { - term.Debug("Reconnect failed:", err) + slog.Debug(fmt.Sprintln("Reconnect failed:", err)) return err } next, stop = iter.Pull2(newLogSeq) @@ -443,7 +445,7 @@ func handleLogEntryMsgs(msg *defangv1.TailResponse, doSpinner bool, skipDuplicat err := handler(e, options, term.DefaultTerm) if err != nil { - term.Debug("Ending tail loop", err) + slog.Debug(fmt.Sprintln("Ending tail loop", err)) return err } @@ -469,7 +471,7 @@ func logEntryPrintHandler(e *defangv1.LogEntry, options *TailOptions, t *term.Te if options.Raw { if e.Stderr { - term.Error(e.Message) + slog.Error(e.Message) } else { term.Println(e.Message) } diff --git a/src/pkg/cli/tailAndMonitor.go b/src/pkg/cli/tailAndMonitor.go index 183136d8f..038cb0add 100644 --- a/src/pkg/cli/tailAndMonitor.go +++ b/src/pkg/cli/tailAndMonitor.go @@ -3,7 +3,9 @@ package cli import ( "context" "errors" + "fmt" "io" + "log/slog" "sync" "time" @@ -11,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -56,7 +57,7 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie // When CD fails, stop WaitServiceState cancelSvcStatus(cdErr) } else { - term.Info("Deployment complete. Waiting for services to be healthy...") + slog.Info("Deployment complete. Waiting for services to be healthy...") } }() @@ -72,10 +73,10 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie // blocking call to tail var tailErr error if err := Tail(tailCtx, provider, project.Name, tailOptions); err != nil { - term.Debug("Tail while monitoring stopped with", err, errors.Unwrap(err)) + slog.Debug(fmt.Sprintln("Tail while monitoring stopped with", err, errors.Unwrap(err))) if connect.CodeOf(err) == connect.CodePermissionDenied { - term.Warn("Unable to tail logs. Waiting for the deployment to finish.") + slog.Warn("Unable to tail logs. Waiting for the deployment to finish.") // If tail fails because of missing permission, we wait for the deployment to finish <-tailCtx.Done() // Get the actual error from the context so we won't print "Error: missing tail permission" @@ -87,14 +88,14 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie break // an end condition was detected; cdErr and/or svcErr might be nil case errors.Is(context.Cause(ctx), context.Canceled): - term.Warn("Deployment is not finished. Service(s) might not be running.") + slog.Warn("Deployment is not finished. Service(s) might not be running.") case errors.Is(context.Cause(tailCtx), errMonitoringDone): break // the monitoring stopped the tail; cdErr and/or svcErr will have been set case errors.Is(context.Cause(ctx), context.DeadlineExceeded): // Tail was canceled when wait-timeout is reached; show a warning and exit with an error - term.Warn("Wait-timeout exceeded, detaching from logs. Deployment still in progress.") + slog.Warn("Wait-timeout exceeded, detaching from logs. Deployment still in progress.") fallthrough default: diff --git a/src/pkg/cli/tail_test.go b/src/pkg/cli/tail_test.go index f97c3c54f..7d4bbfcd4 100644 --- a/src/pkg/cli/tail_test.go +++ b/src/pkg/cli/tail_test.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "io" + "log/slog" "os" "strings" "testing" @@ -122,6 +123,7 @@ func TestTail(t *testing.T) { testTerm.ForceColor(true) defaultTerm := term.DefaultTerm term.DefaultTerm = testTerm + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) t.Cleanup(func() { term.DefaultTerm = defaultTerm }) @@ -185,10 +187,19 @@ func TestTail(t *testing.T) { } for i, g := range got { - e := expectedLogs[i] g = term.StripAnsi(g) - if got := strings.SplitN(g, " ", 2)[1]; got != e { // Remove the date from the log entry - t.Errorf("Tail() = %q, want %q", got, e) + if i == len(got)-1 { + g = strings.TrimSpace(g) + if !strings.HasPrefix(g, "! Reconnecting") { + t.Errorf("Tail() = %q, want something starting with %q", g, "! Reconnecting") + } + } else { + e := expectedLogs[i] + g = strings.TrimRight(g, " ") + e = strings.TrimRight(e, " ") + if got := strings.SplitN(g, " ", 2)[1]; got != e { + t.Errorf("Tail() = %q, want %q", got, e) + } } } @@ -228,8 +239,8 @@ func setupTestTerminal() (*bytes.Buffer, *bytes.Buffer, func()) { testTerm.ForceColor(true) defaultTerm := term.DefaultTerm term.DefaultTerm = testTerm + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) - // Cleanup function to reset the terminal cleanup := func() { term.DefaultTerm = defaultTerm } diff --git a/src/pkg/cli/teardown_cd.go b/src/pkg/cli/teardown_cd.go index 584da8567..8bdee941d 100644 --- a/src/pkg/cli/teardown_cd.go +++ b/src/pkg/cli/teardown_cd.go @@ -5,12 +5,12 @@ import ( "context" "errors" "fmt" + "log/slog" "slices" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/client/byoc/state" "github.com/DefangLabs/defang/src/pkg/dryrun" - "github.com/DefangLabs/defang/src/pkg/term" ) var ErrExistingStacks = errors.New("there are still deployed stacks") @@ -34,9 +34,9 @@ func TearDownCD(ctx context.Context, provider client.Provider, force bool) error }) if len(stacks) > 0 { - term.Info("Some stacks are currently deployed. Run the following commands to tear them down:") + slog.Info("Some stacks are currently deployed. Run the following commands to tear them down:") for _, stack := range stacks { - term.Infof(" `defang down --workspace %s --project-name %s --stack %s`\n", stack.Workspace, stack.Project, stack.Stack) + slog.Info(fmt.Sprintf(" `defang down --workspace %s --project-name %s --stack %s`\n", stack.Workspace, stack.Project, stack.Stack)) } if !force { return ErrExistingStacks diff --git a/src/pkg/cli/token.go b/src/pkg/cli/token.go index 632a26ec2..db30413b4 100644 --- a/src/pkg/cli/token.go +++ b/src/pkg/cli/token.go @@ -2,6 +2,8 @@ package cli import ( "context" + "fmt" + "log/slog" "time" "github.com/DefangLabs/defang/src/pkg/auth" @@ -19,7 +21,7 @@ func Token(ctx context.Context, client client.FabricClient, tenant types.TenantN } code, err := auth.StartAuthCodeFlow(ctx, false, func(token string) { - term.Debug("Getting access token for scope:", s) + slog.Debug(fmt.Sprintln("Getting access token for scope:", s)) }, "token-cli") if err != nil { return err @@ -36,7 +38,7 @@ func Token(ctx context.Context, client client.FabricClient, tenant types.TenantN scopes = []string{string(s)} } - term.Debugf("Generating token for tenant %q with scopes %v", tenant, scopes) + slog.Debug(fmt.Sprintf("Generating token for tenant %q with scopes %v", tenant, scopes)) resp, err := client.Token(ctx, &defangv1.TokenRequest{ Assertion: at, diff --git a/src/pkg/cli/upgrade.go b/src/pkg/cli/upgrade.go index b84f58654..70cdd9aec 100644 --- a/src/pkg/cli/upgrade.go +++ b/src/pkg/cli/upgrade.go @@ -3,6 +3,8 @@ package cli import ( "context" "errors" + "fmt" + "log/slog" "os" "os/exec" "path/filepath" @@ -18,13 +20,13 @@ func Upgrade(ctx context.Context) error { if err != nil { return err } - term.Debugf(" - Executable: %s\n", ex) + slog.Debug(fmt.Sprintf(" - Executable: %s\n", ex)) ex, err = filepath.EvalSymlinks(ex) if err != nil { return err } - term.Debugf(" - Evaluated: %s\n", ex) + slog.Debug(fmt.Sprintf(" - Evaluated: %s\n", ex)) if strings.HasPrefix(ex, "/nix/store/") { // Detect whether the user has used Flakes or nix-env @@ -93,6 +95,6 @@ func homebrewPrefix(ctx context.Context) (string, error) { } func printInstructions(cmd string) { - term.Info("To upgrade defang, run the following command:") + slog.Info("To upgrade defang, run the following command:") term.Print("\n ", cmd, "\n\n") } diff --git a/src/pkg/cli/waitForCdTaskExit.go b/src/pkg/cli/waitForCdTaskExit.go index 18fc558c9..a7970c2af 100644 --- a/src/pkg/cli/waitForCdTaskExit.go +++ b/src/pkg/cli/waitForCdTaskExit.go @@ -19,7 +19,7 @@ func WaitForCdTaskExit(ctx context.Context, provider client.Provider) error { select { case <-ticker.C: done, err := provider.GetDeploymentStatus(ctx) - // term.Debugf("Polled CD task status: done=%v, err=%v", done, err) + // slog.Debug(fmt.Sprintf("Polled CD task status: done=%v, err=%v", done, err)) if err != nil { // End condition: EOF indicates that the task has completed successfully if errors.Is(err, io.EOF) { diff --git a/src/pkg/cli/whoami.go b/src/pkg/cli/whoami.go index 6e17e0b1c..3200212f9 100644 --- a/src/pkg/cli/whoami.go +++ b/src/pkg/cli/whoami.go @@ -2,10 +2,11 @@ package cli import ( "context" + "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/auth" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/types" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" @@ -33,7 +34,7 @@ func Whoami(ctx context.Context, fabric client.FabricClient, maybeProvider clien tenantSelection = types.TenantNameOrID(resp.TenantId) } - term.Debug("User ID: " + resp.UserId) + slog.Debug("User ID: " + resp.UserId) showData := ShowAccountData{ Region: resp.Region, SubscriberTier: resp.Tier, @@ -45,7 +46,7 @@ func Whoami(ctx context.Context, fabric client.FabricClient, maybeProvider clien if maybeProvider != nil { // Add provider account information if err := maybeProvider.Authenticate(ctx, false); err != nil { // Do not interactively login for whoami - term.Debug("Unable to authenticate provider:", err) + slog.Debug(fmt.Sprintln("Unable to authenticate provider:", err)) } account, err := maybeProvider.AccountInfo(ctx) if err == nil { diff --git a/src/pkg/clouds/aws/codebuild/cfn/setup.go b/src/pkg/clouds/aws/codebuild/cfn/setup.go index 6fce2b414..eeb31d4f7 100644 --- a/src/pkg/clouds/aws/codebuild/cfn/setup.go +++ b/src/pkg/clouds/aws/codebuild/cfn/setup.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "slices" "strconv" "strings" @@ -12,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/clouds/aws" awscodebuild "github.com/DefangLabs/defang/src/pkg/clouds/aws/codebuild" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/aws/aws-sdk-go-v2/service/cloudformation" cfnTypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" "github.com/aws/smithy-go" @@ -94,7 +94,7 @@ func (a *AwsCfn) updateStackAndWait(ctx context.Context, templateBody string, fo return err // might call createStackAndWait depending on the error } - term.Info("Waiting for CloudFormation stack", a.stackName, "to be updated...") // TODO: verbose only + slog.Info(fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be updated...")) // TODO: verbose only dso, err := cloudformation.NewStackUpdateCompleteWaiter(cfn, update1s).WaitForOutput(ctx, &cloudformation.DescribeStacksInput{ StackName: uso.StackId, }, stackTimeout) @@ -131,7 +131,7 @@ func (a *AwsCfn) createStackAndWait(ctx context.Context, templateBody string, pa } } - term.Info("Waiting for CloudFormation stack", a.stackName, "to be created...") // TODO: verbose only + slog.Info(fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be created...")) // TODO: verbose only dso, err := cloudformation.NewStackCreateCompleteWaiter(cfn, create1s).WaitForOutput(ctx, &cloudformation.DescribeStacksInput{ StackName: ptr.String(a.stackName), }, stackTimeout) @@ -252,7 +252,7 @@ func (a *AwsCfn) TearDown(ctx context.Context) error { StackName: ptr.String(a.stackName), EnableTerminationProtection: ptr.Bool(false), }); err != nil { - term.Warnf("Failed to disable termination protection for CloudFormation stack %s: %v\n", a.stackName, err) + slog.Warn(fmt.Sprintf("Failed to disable termination protection for CloudFormation stack %s: %v\n", a.stackName, err)) } _, err = cfn.DeleteStack(ctx, &cloudformation.DeleteStackInput{ StackName: ptr.String(a.stackName), @@ -262,7 +262,7 @@ func (a *AwsCfn) TearDown(ctx context.Context) error { return err } - term.Info("Waiting for CloudFormation stack", a.stackName, "to be deleted...") // TODO: verbose only + slog.Info(fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be deleted...")) // TODO: verbose only return cloudformation.NewStackDeleteCompleteWaiter(cfn, delete1s).Wait(ctx, &cloudformation.DescribeStacksInput{ StackName: ptr.String(a.stackName), }, stackTimeout) diff --git a/src/pkg/clouds/aws/login.go b/src/pkg/clouds/aws/login.go index a607fe9a0..cfc573719 100644 --- a/src/pkg/clouds/aws/login.go +++ b/src/pkg/clouds/aws/login.go @@ -13,6 +13,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "os" @@ -71,10 +72,10 @@ func (p *awsOAuthCredentialsProvider) Retrieve(ctx context.Context) (awssdk.Cred } // Access token is expired — use the refresh token to get new credentials. - term.Debug("AWS OAuth access token expired, refreshing...") + slog.Debug("AWS OAuth access token expired, refreshing...") refreshed, err := refreshToken(ctx, p.cached) if err != nil { - term.Debugf("failed to refresh AWS OAuth token: %v", err) + slog.Debug(fmt.Sprintf("failed to refresh AWS OAuth token: %v", err)) return awssdk.Credentials{}, fmt.Errorf("refreshing AWS OAuth token: %w", err) } @@ -85,9 +86,9 @@ func (p *awsOAuthCredentialsProvider) Retrieve(ctx context.Context) (awssdk.Cred return awssdk.Credentials{}, fmt.Errorf("marshaling refreshed token: %w", err) } if err := p.tokenStore.Save(p.storeKey, string(tokenBytes)); err != nil { - term.Warnf("failed to persist refreshed AWS OAuth token: %v", err) + slog.Warn(fmt.Sprintf("failed to persist refreshed AWS OAuth token: %v", err)) } else { - term.Debugf("persisted refreshed AWS OAuth token for %q", p.storeKey) + slog.Debug(fmt.Sprintf("persisted refreshed AWS OAuth token for %q", p.storeKey)) } } @@ -126,14 +127,14 @@ func (a *Aws) Authenticate(ctx context.Context, interactive bool) error { } // 1. Try default AWS credentials - term.Debugf("checking default AWS credentials for region %s...", a.Region) + slog.Debug(fmt.Sprintf("checking default AWS credentials for region %s...", a.Region)) if _, err := a.testCredentials(ctx, nil); err != nil { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - term.Debugf("default AWS credentials invalid: %v", err) + slog.Debug(fmt.Sprintf("default AWS credentials invalid: %v", err)) } else { - term.Debug("found valid default AWS credentials") + slog.Debug("found valid default AWS credentials") return nil } @@ -153,7 +154,7 @@ func (a *Aws) Authenticate(ctx context.Context, interactive bool) error { if !interactive { return errors.New("no valid AWS credentials found") // TODO: Better error message with possible doc link } - term.Info("no valid credentials found, starting interactive login...") + slog.Info("no valid credentials found, starting interactive login...") creds, err := a.tryInteractiveLogin(ctx, 3) if err != nil { return err @@ -179,7 +180,7 @@ func (a *Aws) tryInteractiveLogin(ctx context.Context, n int) (awssdk.Credential sum := sha256.Sum256([]byte(cached.LoginSession)) storeKey = fmt.Sprintf("%s%x", tokenStoreKeyPrefix, sum) if err := a.TokenStore.Save(storeKey, string(tokenBytes)); err != nil { - term.Warnf("failed to save AWS OAuth token: %v", err) + slog.Warn(fmt.Sprintf("failed to save AWS OAuth token: %v", err)) } } @@ -187,7 +188,7 @@ func (a *Aws) tryInteractiveLogin(ctx context.Context, n int) (awssdk.Credential creds, err := a.testCredentialsWithProfile(ctx, storeKey, provider) if err != nil { - term.Warnf("Cannot use login credentials: %v, please try again.", err) + slog.Warn(fmt.Sprintf("Cannot use login credentials: %v, please try again.", err)) continue } return creds, nil @@ -196,7 +197,7 @@ func (a *Aws) tryInteractiveLogin(ctx context.Context, n int) (awssdk.Credential } func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProvider, error) { - term.Debug("checking stored AWS OAuth tokens...") + slog.Debug("checking stored AWS OAuth tokens...") tokenNames, err := a.TokenStore.List(tokenStoreKeyPrefix) if err != nil { return nil, fmt.Errorf("failed to list tokens: %w", err) @@ -205,13 +206,13 @@ func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProv for _, name := range tokenNames { tokenJSON, err := a.TokenStore.Load(name) if err != nil { - term.Debugf("failed to load token %q: %v", name, err) + slog.Debug(fmt.Sprintf("failed to load token %q: %v", name, err)) continue } var cached awsTokenCache if err := json.Unmarshal([]byte(tokenJSON), &cached); err != nil { - term.Debugf("failed to unmarshal token %q: %v", name, err) + slog.Debug(fmt.Sprintf("failed to unmarshal token %q: %v", name, err)) continue } @@ -221,11 +222,11 @@ func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProv } if cached.RefreshToken == "" && time.Now().After(cached.AccessToken.ExpiresAt) { - term.Debugf("token %q is expired and has no refresh token, skipping", name) + slog.Debug(fmt.Sprintf("token %q is expired and has no refresh token, skipping", name)) continue } - term.Debugf("testing token %q (expires %s)...", name, cached.AccessToken.ExpiresAt.Format(time.RFC3339)) + slog.Debug(fmt.Sprintf("testing token %q (expires %s)...", name, cached.AccessToken.ExpiresAt.Format(time.RFC3339))) provider := &awsOAuthCredentialsProvider{cached: &cached, tokenStore: a.TokenStore, storeKey: name} // Calling testCredentialsWithProfile triggers Retrieve(), which auto-refreshes @@ -236,7 +237,7 @@ func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProv if ctx.Err() != nil { return nil, ctx.Err() } - term.Debugf("token %q failed AWS_PROFILE role validation: %v, skipping...", name, err) + slog.Debug(fmt.Sprintf("token %q failed AWS_PROFILE role validation: %v, skipping...", name, err)) continue } return creds, nil @@ -256,19 +257,19 @@ func (a *Aws) testCredentialsWithProfile(ctx context.Context, name string, creds // If the stack/env specifies an AWS_PROFILE with role, try assume the role roleArn, profile, err := a.GetStackAwsProfileRoleArn(ctx) if err != nil { - term.Warnf("failed to get AWS_PROFILE role ARN: %v", err) + slog.Warn(fmt.Sprintf("failed to get AWS_PROFILE role ARN: %v", err)) } else if profile == "" { - term.Warn("AWS_PROFILE environment variable is not set, skipping AWS_PROFILE role validation") + slog.Warn("AWS_PROFILE environment variable is not set, skipping AWS_PROFILE role validation") } else if roleArn != "" { same, err := sameRole(*identity.Arn, roleArn) if err != nil { - term.Warnf("failed to compare token identity with AWS_PROFILE role: %v", err) + slog.Warn(fmt.Sprintf("failed to compare token identity with AWS_PROFILE role: %v", err)) } else if same { - term.Debugf("token %q identity %q matches AWS_PROFILE role %q", name, *identity.Arn, roleArn) + slog.Debug(fmt.Sprintf("token %q identity %q matches AWS_PROFILE role %q", name, *identity.Arn, roleArn)) return creds, nil } - term.Debugf("checking if token %q identity %q can assume AWS_PROFILE role %q", name, *identity.Arn, roleArn) + slog.Debug(fmt.Sprintf("checking if token %q identity %q can assume AWS_PROFILE role %q", name, *identity.Arn, roleArn)) credCfg, err := LoadDefaultConfig(ctx, config.WithRegion(string(a.Region)), config.WithCredentialsProvider(creds)) if err != nil { return nil, err @@ -288,11 +289,11 @@ func (a *Aws) testCredentialsWithProfile(ctx context.Context, name string, creds return nil, fmt.Errorf("login successful, but does not have access to role %q in used by stack aws profile %q; token account %v does not match stack aws profile account %v", roleArn, profile, *identity.Account, parsedArn.AccountID) } // If cannot assume but it's the same account, we assume its a valid token - term.Warnf("login successful for AWS account %v which is same as the account specified by stack aws profile %q, assume its valid", *identity.Account, profile) + slog.Warn(fmt.Sprintf("login successful for AWS account %v which is same as the account specified by stack aws profile %q, assume its valid", *identity.Account, profile)) return creds, nil } // If able to assume the profile role, use the assumed role credentials - term.Debugf("token %q is valid and can assume AWS_PROFILE role %q\n", name, roleArn) + slog.Debug(fmt.Sprintf("token %q is valid and can assume AWS_PROFILE role %q\n", name, roleArn)) return assumeRoleProvider, nil } // If no AWS_PROFILE with role specified, any valid token is considered acceptable @@ -338,7 +339,7 @@ func (a *Aws) InteractiveLogin(ctx context.Context) (*awsTokenCache, error) { port := "8080" // default port if parsing fails parsed, err := url.Parse(redirectURL) if err != nil { - term.Warnf("failed to parse redirect URL %q, assume port 8080: %v", redirectURL, err) + slog.Warn(fmt.Sprintf("failed to parse redirect URL %q, assume port 8080: %v", redirectURL, err)) } else { port = parsed.Port() } diff --git a/src/pkg/clouds/do/appPlatform/setup.go b/src/pkg/clouds/do/appPlatform/setup.go index 632f18147..661e14bd7 100644 --- a/src/pkg/clouds/do/appPlatform/setup.go +++ b/src/pkg/clouds/do/appPlatform/setup.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "path" "regexp" @@ -13,7 +14,6 @@ import ( "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/clouds/do" "github.com/DefangLabs/defang/src/pkg/dockerhub" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -90,7 +90,7 @@ func (d *DoApp) SetUpBucket(ctx context.Context) error { } func getImageSourceSpec(cdImagePath string) (*godo.ImageSourceSpec, error) { - term.Debugf("Using CD image: %q", cdImagePath) + slog.Debug(fmt.Sprintf("Using CD image: %q", cdImagePath)) image, err := dockerhub.ParseImage(cdImagePath) if err != nil { return nil, err @@ -146,7 +146,7 @@ func (d DoApp) Run(ctx context.Context, env []*godo.AppVariableDefinition, cdIma appList, _, err := client.Apps.List(ctx, &godo.ListOptions{}) if err != nil { - term.Debugf("Error listing apps: %s", err) + slog.Debug(fmt.Sprintf("Error listing apps: %s", err)) } for _, app := range appList { @@ -157,7 +157,7 @@ func (d DoApp) Run(ctx context.Context, env []*godo.AppVariableDefinition, cdIma //Update current CD app if it exists if currentCd.Spec != nil && currentCd.Spec.Name != "" { - term.Debugf("Updating existing CD app") + slog.Debug("Updating existing CD app") currentCd, _, err = client.Apps.Update(ctx, currentCd.ID, &godo.AppUpdateRequest{ Spec: appJobSpec, UpdateAllSourceVersions: true, // force update of the CD image @@ -167,7 +167,7 @@ func (d DoApp) Run(ctx context.Context, env []*godo.AppVariableDefinition, cdIma return nil, err } } else { - term.Debugf("Creating new CD app") + slog.Debug("Creating new CD app") project, _, err := client.Projects.Create(ctx, &godo.CreateProjectRequest{ Name: CdName, Purpose: "Infrastructure for running Defang commands", diff --git a/src/pkg/clouds/gcp/api.go b/src/pkg/clouds/gcp/api.go index 5d6cdc35d..623d7155e 100644 --- a/src/pkg/clouds/gcp/api.go +++ b/src/pkg/clouds/gcp/api.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "time" "github.com/DefangLabs/defang/src/pkg" @@ -27,7 +28,7 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { projectName := "projects/" + gcp.ProjectId for i := range maxAttempts { - term.Debugf("Enabling services: %v\n", apis) + slog.Debug(fmt.Sprintf("Enabling services: %v\n", apis)) req := &serviceusage.BatchEnableServicesRequest{ ServiceIds: apis, } @@ -41,7 +42,7 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { } term.Printf("Error: %+v (%T)\n", err, err) if i < maxAttempts-1 { - term.Debugf("Failed to enable services, will retry in %v: %v\n", retryInterval, err) + slog.Debug(fmt.Sprintf("Failed to enable services, will retry in %v: %v\n", retryInterval, err)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } @@ -54,11 +55,11 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { for { op, err := opService.Get(operation.Name).Context(ctx).Do() if err != nil { - term.Warnf("Failed to get operation status: %v\n", err) + slog.Warn(fmt.Sprintf("Failed to get operation status: %v\n", err)) } else if op.Done { // Check if the operation is done if op.Error != nil { if i < maxAttempts-1 { - term.Debugf("Failed to enable services operation, will retry in %v: %v\n", retryInterval, op.Error) + slog.Debug(fmt.Sprintf("Failed to enable services operation, will retry in %v: %v\n", retryInterval, op.Error)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } diff --git a/src/pkg/clouds/gcp/iam.go b/src/pkg/clouds/gcp/iam.go index c8adc4582..d14a12811 100644 --- a/src/pkg/clouds/gcp/iam.go +++ b/src/pkg/clouds/gcp/iam.go @@ -3,6 +3,7 @@ package gcp import ( "context" "fmt" + "log/slog" "slices" "time" @@ -14,7 +15,6 @@ import ( resourcemanager "cloud.google.com/go/resourcemanager/apiv3" "cloud.google.com/go/storage" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" gax "github.com/googleapis/gax-go/v2" ) @@ -36,7 +36,7 @@ func (gcp Gcp) EnsureRoleExists(ctx context.Context, roleId, title, description role.GetTitle() == title && role.GetDescription() == description && role.Stage == iamadmpb.Role_GA { - term.Debugf("Role %s already exists", roleId) + slog.Debug(fmt.Sprintf("Role %s already exists", roleId)) return role.Name, nil } @@ -45,7 +45,7 @@ func (gcp Gcp) EnsureRoleExists(ctx context.Context, roleId, title, description role.Title = title role.Description = description role.Stage = iamadmpb.Role_GA - term.Infof("Updating role %s", roleId) + slog.Info("Updating role " + roleId) if _, err := client.UpdateRole(ctx, &iamadmpb.UpdateRoleRequest{Name: roleName, Role: role}); err != nil { return "", fmt.Errorf("failed to update role: %w", err) } @@ -63,12 +63,12 @@ func (gcp Gcp) EnsureRoleExists(ctx context.Context, roleId, title, description Stage: iamadmpb.Role_GA, // TODO: investigate stage }, } - term.Infof("Creating role %s", roleId) + slog.Info("Creating role " + roleId) role, err = client.CreateRole(ctx, req) if err != nil { return "", fmt.Errorf("failed to create role: %w", err) } - term.Debugf("Role %s created successfully.", roleId) + slog.Debug(fmt.Sprintf("Role %s created successfully.", roleId)) } // Wait for the role to be created or updated @@ -102,13 +102,13 @@ func (gcp Gcp) EnsureServiceAccountExists(ctx context.Context, serviceAccountId, if err == nil { if account.GetDisplayName() == displayName && account.GetDescription() == description { - term.Debugf("Service account %s already exists", serviceAccountId) + slog.Debug(fmt.Sprintf("Service account %s already exists", serviceAccountId)) return account.Name, nil } account.DisplayName = displayName account.Description = description - term.Infof("Updating service account %s", serviceAccountId) + slog.Info("Updating service account " + serviceAccountId) if _, err := client.UpdateServiceAccount(ctx, &iamadmpb.ServiceAccount{Name: account.Name, DisplayName: displayName, Description: description}); err != nil { return "", fmt.Errorf("failed to update service account: %w", err) } @@ -124,13 +124,13 @@ func (gcp Gcp) EnsureServiceAccountExists(ctx context.Context, serviceAccountId, }, Name: "projects/" + gcp.ProjectId, } - term.Infof("Creating service account %s", serviceAccountId) + slog.Info("Creating service account " + serviceAccountId) account, err := client.CreateServiceAccount(ctx, req) if err != nil { return "", fmt.Errorf("failed to create service account: %w", err) } - term.Debugf("Service account %s created successfully.", serviceAccountId) + slog.Debug(fmt.Sprintf("Service account %s created successfully.", serviceAccountId)) accountName := account.Name for start := time.Now(); time.Since(start) < 5*time.Minute; { account, err = client.GetServiceAccount(ctx, &iamadmpb.GetServiceAccountRequest{Name: accountName}) @@ -188,15 +188,15 @@ func (gcp Gcp) EnsurePrincipalHasBucketRoles(ctx context.Context, bucketName, pr } if !needUpdate { - term.Debugf("Principal %s already has roles %v on bucket %s", principal, roles, bucketName) + slog.Debug(fmt.Sprintf("Principal %s already has roles %v on bucket %s", principal, roles, bucketName)) return nil } - term.Infof("Updating IAM policy for principal %s on bucket %s", principal, bucketName) + slog.Info(fmt.Sprintf("Updating IAM policy for principal %s on bucket %s", principal, bucketName)) for i := range maxAttempts { // Service account might not be visible for a few seconds after creation for policy attachment if err := bucket.IAM().SetPolicy(ctx, policy); err != nil { if i < maxAttempts-1 { - term.Infof("Failed to set IAM policy, will retry in %v: %v\n", retryInterval, err) + slog.Info(fmt.Sprintf("Failed to set IAM policy, will retry in %v: %v\n", retryInterval, err)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } @@ -269,14 +269,14 @@ func (gcp Gcp) EnsurePrincipalHasServiceAccountRoles(ctx context.Context, princi return nil } - term.Infof("Updating IAM policy for %s on service account %s", principal, serviceAccount) + slog.Info(fmt.Sprintf("Updating IAM policy for %s on service account %s", principal, serviceAccount)) for i := range maxAttempts { // Service account might not be visible for a few seconds after creation for policy attachment if _, err := client.SetIamPolicy(ctx, &iamadm.SetIamPolicyRequest{ Resource: resource, Policy: policy, }); err != nil { if i < maxAttempts-1 { - term.Infof("Failed to set IAM policy for service account %s, will retry in %v: %v\n", serviceAccount, retryInterval, err) + slog.Info(fmt.Sprintf("Failed to set IAM policy for service account %s, will retry in %v: %v\n", serviceAccount, retryInterval, err)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } @@ -345,15 +345,15 @@ func ensurePrincipalHasRolesWithResource(ctx context.Context, client resourceWit } if !bindingNeedsUpdate && len(rolesNotFound) == 0 { - term.Debugf("%s already has roles %v on resource %s", principal, roles, resource) + slog.Debug(fmt.Sprintf("%s already has roles %v on resource %s", principal, roles, resource)) return nil } - term.Infof("Updating IAM policy for resource %s", resource) + slog.Info("Updating IAM policy for resource " + resource) for i := range maxAttempts { // Service account might not be visible for a few seconds after creation for policy attachment if _, err := client.SetIamPolicy(ctx, &iampb.SetIamPolicyRequest{Resource: resource, Policy: policy}); err != nil { if i < maxAttempts-1 { - term.Debugf("Failed to set IAM policy for resource %s, will retry in %v: %v\n", resource, retryInterval, err) + slog.Debug(fmt.Sprintf("Failed to set IAM policy for resource %s, will retry in %v: %v\n", resource, retryInterval, err)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } diff --git a/src/pkg/clouds/gcp/logging.go b/src/pkg/clouds/gcp/logging.go index 992611e4f..720bbe068 100644 --- a/src/pkg/clouds/gcp/logging.go +++ b/src/pkg/clouds/gcp/logging.go @@ -5,10 +5,10 @@ import ( "errors" "fmt" "io" + "log/slog" logging "cloud.google.com/go/logging/apiv2" "cloud.google.com/go/logging/apiv2/loggingpb" - "github.com/DefangLabs/defang/src/pkg/term" "google.golang.org/api/iterator" ) @@ -75,9 +75,9 @@ func (t *gcpLoggingTailer) Next(ctx context.Context) (*loggingpb.LogEntry, error func (t *gcpLoggingTailer) Close() error { // TODO: find out how to properly close the client - term.Debugf("Closing log tailer") + slog.Debug("Closing log tailer") e1 := t.tleClient.CloseSend() - term.Debugf("Closing log tailer client") + slog.Debug("Closing log tailer client") e2 := t.client.Close() return errors.Join(e1, e2) } @@ -116,7 +116,7 @@ func (gcp Gcp) ListLogEntries(ctx context.Context, query string, order Order) (L func (l *gcpLoggingLister) Next() (*loggingpb.LogEntry, error) { entry, err := l.it.Next() if err == iterator.Done { - term.Debugf("Closing log lister client") + slog.Debug("Closing log lister client") if err := l.client.Close(); err != nil { return nil, err } diff --git a/src/pkg/clouds/gcp/login.go b/src/pkg/clouds/gcp/login.go index 98d4f142e..c8ba00fe0 100644 --- a/src/pkg/clouds/gcp/login.go +++ b/src/pkg/clouds/gcp/login.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "os" "path" "slices" @@ -110,14 +111,14 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { // 1. Try the default application credentials or from the "GOOGLE_APPLICATION_CREDENTIALS" env var if set // - if the user has login with glcoud cli with application default credentials // - if the user has set GOOGLE_APPLICATION_CREDENTIALS to a service account key file with required permissions - term.Debugf("checking if application default credentials are available and has permission, GOOGLE_APPLICATION_CREDENTIALS=%q...", os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")) + slog.Debug(fmt.Sprintf("checking if application default credentials are available and has permission, GOOGLE_APPLICATION_CREDENTIALS=%q...", os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"))) if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, nil); err != nil { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - term.Debugf("the application default credentials are missing permissions: %v", err) + slog.Debug(fmt.Sprintf("the application default credentials are missing permissions: %v", err)) } else { - term.Debug("found valid application default credentials with required permissions") + slog.Debug("found valid application default credentials with required permissions") // No need to pass down ADC token source via options since ADC is automatically used by gcp sdk return nil } @@ -127,13 +128,13 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - term.Warnf("failed to get GitHub Actions OIDC token source: %v", err) + slog.Warn(fmt.Sprintf("failed to get GitHub Actions OIDC token source: %v", err)) } else if tokenSource != nil { - term.Debug("found GitHub Actions OIDC token source, testing permissions...") + slog.Debug("found GitHub Actions OIDC token source, testing permissions...") if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, tokenSource); err != nil { - term.Warnf("GitHub Actions OIDC token is missing required permissions on project %q: %v\nPlease ensure your workload identity provider and github actions permissions are set up correctly: https://docs.defang.com/defang-byoc/gcp/github-actions\n", gcp.ProjectId, err) + slog.Warn(fmt.Sprintf("GitHub Actions OIDC token is missing required permissions on project %q: %v\nPlease ensure your workload identity provider and github actions permissions are set up correctly: https://docs.defang.com/defang-byoc/gcp/github-actions\n", gcp.ProjectId, err)) } else { - term.Debug("GitHub Actions OIDC token has required permissions") + slog.Debug("GitHub Actions OIDC token has required permissions") gcp.Options = append(gcp.Options, option.WithTokenSource(tokenSource)) gcp.TokenSource = tokenSource gcp.Principal = principal @@ -146,9 +147,9 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - term.Warnf("failed to load stored credentials: %v", err) + slog.Warn(fmt.Sprintf("failed to load stored credentials: %v", err)) } else if tokenSource != nil { - term.Debug("found valid stored credentials with required permissions") + slog.Debug("found valid stored credentials with required permissions") gcp.Options = append(gcp.Options, option.WithTokenSource(tokenSource)) gcp.TokenSource = tokenSource return nil @@ -158,7 +159,7 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { if !interactive { return errors.New("No valid gcloud credentials found") // TODO: Better error message with possible doc link } - term.Debug("no valid tokens found in token store, starting interactive login flow...") + slog.Debug("no valid tokens found in token store, starting interactive login flow...") return gcp.tryInteractiveLogin(ctx, 3) } @@ -170,11 +171,11 @@ func (gcp *Gcp) tryInteractiveLogin(ctx context.Context, n int) error { } if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, tokenSource); err != nil { if errors.As(err, &ErrorMissingPermissions{}) { - term.Warnf("Token from interactive login is missing required permissions on project %q: %v\nPlease ensure your user has the following permissions: %v\n", gcp.ProjectId, err, requiredPerms) + slog.Warn(fmt.Sprintf("Token from interactive login is missing required permissions on project %q: %v\nPlease ensure your user has the following permissions: %v\n", gcp.ProjectId, err, requiredPerms)) } else { - term.Warnf("Failed to validate token from interactive login on project %q: %v\n", gcp.ProjectId, err) + slog.Warn(fmt.Sprintf("Failed to validate token from interactive login on project %q: %v\n", gcp.ProjectId, err)) } - term.Warn("Please try logging in again with an account that has the required permissions.") + slog.Warn("Please try logging in again with an account that has the required permissions.") continue } gcp.Options = append(gcp.Options, option.WithTokenSource(tokenSource)) @@ -192,7 +193,7 @@ func (gcp *Gcp) tryInteractiveLogin(ctx context.Context, n int) error { return fmt.Errorf("failed to marshal token: %w", err) } if gcp.TokenStore == nil { - term.Warn("No token store configured, skipping persisting token") + slog.Warn("No token store configured, skipping persisting token") return nil } if err := gcp.TokenStore.Save(tokenName, string(bytes)); err != nil { @@ -221,24 +222,24 @@ func (gcp *Gcp) findStoredCredentials(ctx context.Context) (oauth2.TokenSource, for _, name := range oauthTokenNames { tokenJson, err := gcp.TokenStore.Load(name) if err != nil { - term.Warnf("failed to load previously saved auth token %q: %v", name, err) + slog.Warn(fmt.Sprintf("failed to load previously saved auth token %q: %v", name, err)) continue } var token oauth2.Token if err = json.Unmarshal([]byte(tokenJson), &token); err != nil { - term.Warnf("failed to parse previously saved auth token %q: %v", name, err) + slog.Warn(fmt.Sprintf("failed to parse previously saved auth token %q: %v", name, err)) continue } - term.Debugf("Testing token %q from store for required permissions...", name) + slog.Debug(fmt.Sprintf("Testing token %q from store for required permissions...", name)) tokenSource := config.TokenSource(ctx, &token) if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, tokenSource); err == nil { - term.Debugf("Token %q is valid and has required permissions\n", name) + slog.Debug(fmt.Sprintf("Token %q is valid and has required permissions\n", name)) currentToken, err := tokenSource.Token() if err != nil { return nil, fmt.Errorf("failed to retrieve current token from token source: %w", err) } if currentToken.AccessToken != token.AccessToken || currentToken.Expiry != token.Expiry || currentToken.RefreshToken != token.RefreshToken { - term.Debugf("Token %q has been updated, persisting updated token...\n", name) + slog.Debug(fmt.Sprintf("Token %q has been updated, persisting updated token...\n", name)) bytes, err := json.Marshal(currentToken) if err != nil { return nil, fmt.Errorf("failed to marshal updated token: %w", err) @@ -254,7 +255,7 @@ func (gcp *Gcp) findStoredCredentials(ctx context.Context) (oauth2.TokenSource, if ctx.Err() != nil { return nil, ctx.Err() } - term.Debugf("Token %q is missing required permissions: %v\n", name, err) + slog.Debug(fmt.Sprintf("Token %q is missing required permissions: %v\n", name, err)) continue } } @@ -265,7 +266,7 @@ func findGithubCredentials(ctx context.Context) (oauth2.TokenSource, string, err // If both ACTIONS_ID_TOKEN_REQUEST_URL and GOOGLE_WORKLOAD_IDENTITY_PROVIDER are set, we're doing "Workload Identity Federation" with GCP using github id token githubTokenReqUrl := os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL") gcpProvider := os.Getenv("GOOGLE_WORKLOAD_IDENTITY_PROVIDER") - term.Debugf("ACTIONS_ID_TOKEN_REQUEST_URL=%q, GOOGLE_WORKLOAD_IDENTITY_PROVIDER=%q", githubTokenReqUrl, gcpProvider) + slog.Debug(fmt.Sprintf("ACTIONS_ID_TOKEN_REQUEST_URL=%q, GOOGLE_WORKLOAD_IDENTITY_PROVIDER=%q", githubTokenReqUrl, gcpProvider)) if githubTokenReqUrl == "" || gcpProvider == "" { return nil, "", nil } diff --git a/src/pkg/clouds/gcp/storage.go b/src/pkg/clouds/gcp/storage.go index a27ab29af..cd6a59ff5 100644 --- a/src/pkg/clouds/gcp/storage.go +++ b/src/pkg/clouds/gcp/storage.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "iter" + "log/slog" "strings" "time" @@ -12,7 +13,6 @@ import ( "cloud.google.com/go/iam/credentials/apiv1/credentialspb" "cloud.google.com/go/storage" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/google/uuid" "google.golang.org/api/impersonate" @@ -38,7 +38,7 @@ func (gcp Gcp) EnsureBucketExists(ctx context.Context, prefix string, versioning return "", fmt.Errorf("failed to get bucket with prefix %q: %w", prefix, err) } if existing != "" { - term.Debugf("Bucket %q already exists\n", existing) + slog.Debug(fmt.Sprintf("Bucket %q already exists\n", existing)) err := gcp.UpdateBucketVersioning(ctx, existing, versioning) if err != nil { return "", fmt.Errorf("failed to ensure versioning is enabled on existing bucket %q: %w", existing, err) @@ -53,7 +53,7 @@ func (gcp Gcp) EnsureBucketExists(ctx context.Context, prefix string, versioning defer client.Close() newBucketName := fmt.Sprintf("%s-%s", prefix, pkg.RandomID()) - term.Infof("Creating defang cd bucket %q", newBucketName) + slog.Info(fmt.Sprintf("Creating defang cd bucket %q", newBucketName)) bucket := client.Bucket(newBucketName) if err := bucket.Create(ctx, gcp.ProjectId, &storage.BucketAttrs{ diff --git a/src/pkg/debug/debug_test.go b/src/pkg/debug/debug_test.go index 9abf216cf..ef2bb0972 100644 --- a/src/pkg/debug/debug_test.go +++ b/src/pkg/debug/debug_test.go @@ -3,6 +3,7 @@ package debug import ( "context" "fmt" + "log/slog" "os" "testing" "time" @@ -10,7 +11,6 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) @@ -164,7 +164,7 @@ func TestDebugComposeLoadError(t *testing.T) { _, loadErr := loader.LoadProject(ctx) if loadErr != nil { - term.Error("Cannot load project:", loadErr) + slog.Error(fmt.Sprintln("Cannot load project:", loadErr)) project, err := loader.CreateProjectForDebug() assert.NoError(t, err, "CreateProjectForDebug should not return an error") diff --git a/src/pkg/dockerhub/dockerhub.go b/src/pkg/dockerhub/dockerhub.go index 16da979d1..e044b6c08 100644 --- a/src/pkg/dockerhub/dockerhub.go +++ b/src/pkg/dockerhub/dockerhub.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "path" "slices" @@ -15,7 +16,6 @@ import ( "time" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/docker/cli/cli/config" ) @@ -79,7 +79,7 @@ func GenerateNewPublicOnlyPAT(ctx context.Context, label string) (string, string } pat, err = docHubClient.CreatePAT(ctx, label, []string{"repo:public_read"}) if err != nil { - term.Infof("Failed to create Docker Hub PAT, fallback to existing docker credentials: %v", err) + slog.Info(fmt.Sprintf("Failed to create Docker Hub PAT, fallback to existing docker credentials: %v", err)) // Fallback to use the password as PAT pat = password } diff --git a/src/pkg/github/version.go b/src/pkg/github/version.go index 45b93c633..7b338643d 100644 --- a/src/pkg/github/version.go +++ b/src/pkg/github/version.go @@ -4,10 +4,10 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "os" "github.com/DefangLabs/defang/src/pkg/http" - "github.com/DefangLabs/defang/src/pkg/term" ) const latestUrl = "https://api.github.com/repos/DefangLabs/defang/releases/latest" @@ -35,12 +35,12 @@ func GetLatestReleaseTag(ctx context.Context) (string, error) { } defer resp.Body.Close() if resp.StatusCode != 200 { - term.Debug(resp.Header) + slog.Debug(fmt.Sprintf("%v", resp.Header)) // The primary rate limit for unauthenticated requests is 60 requests per hour, per IP. // The API returns a 403 status code when the rate limit is exceeded. githubError := githubError{Message: resp.Status} if err := json.NewDecoder(resp.Body).Decode(&githubError); err != nil { - term.Debugf("Failed to decode GitHub response: %v", err) + slog.Debug(fmt.Sprintf("Failed to decode GitHub response: %v", err)) } return "", fmt.Errorf("error fetching release info from GitHub: %s", githubError.Message) } diff --git a/src/pkg/login/agree_tos.go b/src/pkg/login/agree_tos.go index dc8586244..64734fc5e 100644 --- a/src/pkg/login/agree_tos.go +++ b/src/pkg/login/agree_tos.go @@ -3,6 +3,8 @@ package login import ( "context" "errors" + "fmt" + "log/slog" "github.com/AlecAivazis/survey/v2" "github.com/DefangLabs/defang/src/pkg/cli/client" @@ -19,7 +21,7 @@ func InteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) error if client.TermsAccepted() { // The user has already agreed to the terms of service recently if err := nonInteractiveAgreeToS(ctx, fabric); err != nil { - term.Debug("unable to agree to terms:", err) // not fatal + slog.Debug(fmt.Sprintln("unable to agree to terms:", err)) // not fatal } return nil } @@ -50,7 +52,7 @@ func NonInteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) err // Persist the terms agreement in the state file so that we don't ask again if err := client.AcceptTerms(); err != nil { - term.Debug("unable to persist terms agreement:", err) // not fatal + slog.Debug(fmt.Sprintln("unable to persist terms agreement:", err)) // not fatal } return nonInteractiveAgreeToS(ctx, fabric) @@ -60,6 +62,6 @@ func nonInteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) err if err := fabric.AgreeToS(ctx); err != nil { return err } - term.Info("You have agreed to the Defang terms of service") + slog.Info("You have agreed to the Defang terms of service") return nil } diff --git a/src/pkg/login/login.go b/src/pkg/login/login.go index 25dc1700d..326131aa9 100644 --- a/src/pkg/login/login.go +++ b/src/pkg/login/login.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "path/filepath" @@ -27,7 +28,7 @@ type AuthService interface { type OpenAuthService struct{} func (OpenAuthService) login(ctx context.Context, fabricAddr string, flow LoginFlow, mcpClient string) (string, error) { - term.Debug("Logging in to", fabricAddr) + slog.Debug(fmt.Sprintln("Logging in to", fabricAddr)) code, err := auth.StartAuthCodeFlow(ctx, flow, func(token string) { client.SaveAccessToken(fabricAddr, token) @@ -60,7 +61,7 @@ func interactiveLogin(ctx context.Context, fabricAddr string, flow LoginFlow, mc } if err := client.SaveAccessToken(fabricAddr, token); err != nil { - term.Warn(err) + slog.Warn(fmt.Sprintf("%v", err)) var pathError *os.PathError if errors.As(err, &pathError) { term.Printf("\nTo fix file permissions, run:\n\n sudo chown -R $(whoami) %q\n", pathError.Path) @@ -72,12 +73,12 @@ func interactiveLogin(ctx context.Context, fabricAddr string, flow LoginFlow, mc } func NonInteractiveGitHubLogin(ctx context.Context, fabric client.FabricClient, fabricAddr string) error { - term.Debug("Non-interactive login using GitHub Actions id-token") + slog.Debug("Non-interactive login using GitHub Actions id-token") idToken, err := github.GetIdToken(ctx, "") // default audience (ie. https://github.com/ORG) if err != nil { return fmt.Errorf("non-interactive login failed: %w", err) } - term.Debug("Got GitHub Actions id-token") + slog.Debug("Got GitHub Actions id-token") // Create a Fabric token using the GitHub token as an assertion resp, err := fabric.Token(ctx, &defangv1.TokenRequest{ @@ -103,7 +104,7 @@ func NonInteractiveGitHubLogin(ctx context.Context, fabric client.FabricClient, os.Setenv("AWS_WEB_IDENTITY_TOKEN_FILE", jwtPath) os.Setenv("AWS_ROLE_SESSION_NAME", "defang-cli") // TODO: from WhoAmI } else { - term.Debugf("AWS_WEB_IDENTITY_TOKEN_FILE is already set; not writing token to a new file") + slog.Debug("AWS_WEB_IDENTITY_TOKEN_FILE is already set; not writing token to a new file") } return err @@ -111,7 +112,7 @@ func NonInteractiveGitHubLogin(ctx context.Context, fabric client.FabricClient, func writeWebIdentityToken(fabricAddr, token string) (string, error) { jwtPath, _ := client.GetWebIdentityTokenFile(fabricAddr) - term.Debugf("writing web identity token to %s", jwtPath) + slog.Debug("writing web identity token to " + jwtPath) dir, _ := filepath.Split(jwtPath) if err := os.MkdirAll(dir, 0700); err != nil { return "", fmt.Errorf("failed to create web identity token directory: %w", err) @@ -129,8 +130,8 @@ func InteractiveRequireLoginAndToS(ctx context.Context, fabric client.FabricClie if err = fabric.CheckLoginAndToS(ctx); err != nil { // Login interactively now; only do this for authorization-related errors if connect.CodeOf(err) == connect.CodeUnauthenticated { - term.Debug("Server error:", err) - term.Warn("Please log in to continue.") + slog.Debug(fmt.Sprintln("Server error:", err)) + slog.Warn("Please log in to continue.") term.ResetWarnings() // clear any previous warnings so we don't show them again defer func() { track.Cmd(nil, "Login", P("reason", err)) }() @@ -153,7 +154,7 @@ func InteractiveRequireLoginAndToS(ctx context.Context, fabric client.FabricClie // Check if the user has agreed to the terms of service and show a prompt if needed if connect.CodeOf(err) == connect.CodeFailedPrecondition { - term.Warn(client.PrettyError(err)) + slog.Warn(fmt.Sprintf("%v", client.PrettyError(err))) defer func() { track.Cmd(nil, "Terms", P("reason", err)) }() if err = InteractiveAgreeToS(ctx, fabric); err != nil { diff --git a/src/pkg/logs/slog.go b/src/pkg/logs/slog.go index a840c47cf..f06af7682 100644 --- a/src/pkg/logs/slog.go +++ b/src/pkg/logs/slog.go @@ -4,12 +4,15 @@ import ( "context" "log/slog" "strings" + "sync" "github.com/DefangLabs/defang/src/pkg/term" ) type termHandler struct { - t *term.Term + t *term.Term + attrs string // pre-formatted persistent attrs + mu sync.Mutex } func newTermHandler(t *term.Term) *termHandler { @@ -21,43 +24,44 @@ func NewTermLogger(t *term.Term) *slog.Logger { } func (h *termHandler) Handle(ctx context.Context, r slog.Record) error { + h.mu.Lock() + defer h.mu.Unlock() + msg := r.Message - // Format attrs if any - if r.NumAttrs() > 0 { - var builder strings.Builder - builder.WriteString(msg) - opened := false - r.Attrs(func(a slog.Attr) bool { - if !opened { - builder.WriteString(" {") - opened = true - } else { - builder.WriteString(", ") - } - strVal := a.String() - if len(strVal) > 80 { - runes := []rune(strVal) - strVal = string(runes[:77]) + "..." - } - builder.WriteString(strVal) - return true - }) - builder.WriteString("}") - msg = builder.String() + + // Collect attrs from WithAttrs and from this record + var sb strings.Builder + if h.attrs != "" { + sb.WriteString(h.attrs) + } + r.Attrs(func(a slog.Attr) bool { + if sb.Len() > 0 { + sb.WriteString(", ") + } + strVal := a.String() + if len(strVal) > 80 { + runes := []rune(strVal) + strVal = string(runes[:77]) + "..." + } + sb.WriteString(strVal) + return true + }) + if sb.Len() > 0 { + msg = msg + " {" + sb.String() + "}" } switch r.Level { case slog.LevelDebug: - _, err := h.t.Debug(msg) + _, err := h.t.WriteDebug(msg) return err case slog.LevelInfo: - _, err := h.t.Info(msg) + _, err := h.t.WriteInfo(msg) return err case slog.LevelWarn: - _, err := h.t.Warn(msg) + _, err := h.t.WriteWarn(msg) return err case slog.LevelError: - _, err := h.t.Error(msg) + _, err := h.t.WriteError(msg) return err default: _, err := h.t.Println(msg) @@ -73,11 +77,22 @@ func (h *termHandler) Enabled(ctx context.Context, level slog.Level) bool { } func (h *termHandler) WithAttrs(attrs []slog.Attr) slog.Handler { - // Since we format attributes in Handle(), we can just return self - return h + var sb strings.Builder + sb.WriteString(h.attrs) + for _, a := range attrs { + if sb.Len() > 0 { + sb.WriteString(", ") + } + strVal := a.String() + if len(strVal) > 80 { + runes := []rune(strVal) + strVal = string(runes[:77]) + "..." + } + sb.WriteString(strVal) + } + return &termHandler{t: h.t, attrs: sb.String()} } func (h *termHandler) WithGroup(name string) slog.Handler { - // Groups are not supported in this implementation return h } diff --git a/src/pkg/mcp/mcp_server.go b/src/pkg/mcp/mcp_server.go index 1819aaeae..6b3481c1a 100644 --- a/src/pkg/mcp/mcp_server.go +++ b/src/pkg/mcp/mcp_server.go @@ -3,6 +3,7 @@ package mcp import ( "context" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg/agent/common" agentTools "github.com/DefangLabs/defang/src/pkg/agent/tools" @@ -10,7 +11,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/mcp/resources" "github.com/DefangLabs/defang/src/pkg/mcp/tools" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/track" "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" @@ -30,13 +30,13 @@ type ToolTracker struct { func (t *ToolTracker) TrackTool(name string, handler server.ToolHandlerFunc) server.ToolHandlerFunc { return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { name := request.Params.Name - term.Debug("MCP Tool Called: " + name + " with params: " + fmt.Sprintf("%+v", request.Params)) + slog.Debug("MCP Tool Called: " + name + " with params: " + fmt.Sprintf("%+v", request.Params)) track.Evt("MCP Tool Called", track.P("tool", name), track.P("client", t.client), track.P("cluster", t.fabricAddr), track.P("provider", *t.providerId)) resp, err := handler(ctx, request) if err != nil { - term.Error("MCP Tool Failed: "+name, "error", err) + slog.Error(fmt.Sprintln("MCP Tool Failed: "+name, "error", err)) } else { - term.Debug("MCP Tool Succeeded: " + name) + slog.Debug("MCP Tool Succeeded: " + name) } track.Evt("MCP Tool Done", track.P("tool", name), track.P("client", t.client), track.P("cluster", t.fabricAddr), track.P("provider", *t.providerId), track.P("error", err)) return resp, err diff --git a/src/pkg/mcp/resources/resources.go b/src/pkg/mcp/resources/resources.go index 75b5a762d..e141eb226 100644 --- a/src/pkg/mcp/resources/resources.go +++ b/src/pkg/mcp/resources/resources.go @@ -3,11 +3,11 @@ package resources import ( "context" "fmt" + "log/slog" "os" "path/filepath" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" ) @@ -26,7 +26,7 @@ var samplesExamplesPath = filepath.Join(client.StateDir, "samples_examples.json" // setupDocumentationResource configures and adds the documentation resource to the MCP server func setupDocumentationResource(s *server.MCPServer) { - term.Info("Creating documentation resource") + slog.Info("Creating documentation resource") docResource := mcp.NewResource( "doc:///knowledge_base/knowledge_base.json", "knowledge_base", @@ -38,7 +38,7 @@ func setupDocumentationResource(s *server.MCPServer) { // Read the file file, err := os.ReadFile(knowledgeBasePath) if err != nil { - term.Error("Failed to read resource file", "error", err, "path", "knowledge_base.json") + slog.Error(fmt.Sprintln("Failed to read resource file", "error", err, "path", "knowledge_base.json")) return nil, fmt.Errorf("failed to read resource file knowledge_base.json: %w", err) } @@ -55,7 +55,7 @@ func setupDocumentationResource(s *server.MCPServer) { // setupSamplesResource configures and adds the samples examples resource to the MCP server func setupSamplesResource(s *server.MCPServer) { - term.Info("Creating samples examples resource") + slog.Info("Creating samples examples resource") samplesResource := mcp.NewResource( "doc:///knowledge_base/samples_examples.json", "defang_dockerfile_and_compose_examples", @@ -68,7 +68,7 @@ func setupSamplesResource(s *server.MCPServer) { // Read the file file, err := os.ReadFile(samplesExamplesPath) if err != nil { - term.Error("Failed to read resource file", "error", err, "path", "samples_examples.json") + slog.Error(fmt.Sprintln("Failed to read resource file", "error", err, "path", "samples_examples.json")) return nil, fmt.Errorf("failed to read resource file samples_examples.json: %w", err) } diff --git a/src/pkg/mcp/setup.go b/src/pkg/mcp/setup.go index 176271240..2bf24f731 100644 --- a/src/pkg/mcp/setup.go +++ b/src/pkg/mcp/setup.go @@ -3,6 +3,7 @@ package mcp import ( "encoding/json" "fmt" + "log/slog" "os" "path/filepath" "runtime" @@ -10,10 +11,8 @@ import ( "strings" "github.com/AlecAivazis/survey/v2" - "github.com/pelletier/go-toml/v2" - - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/track" + "github.com/pelletier/go-toml/v2" ) // MCPServerConfig represents the configuration for an MCP server @@ -344,7 +343,7 @@ func SetupClient(clientStr string) error { return err } - term.Infof("Updating %q\n", configPath) + slog.Info(fmt.Sprintf("Updating %q\n", configPath)) // Create the directory if it doesn't exist configDir := filepath.Dir(configPath) @@ -357,7 +356,7 @@ func SetupClient(clientStr string) error { return fmt.Errorf("failed to update MCP config file for client %q: %w", client, err) } - term.Infof("Ensure %s is upgraded to the latest version and restarted for MCP settings to take effect.\n", client) + slog.Info(fmt.Sprintf("Ensure %s is upgraded to the latest version and restarted for MCP settings to take effect.\n", client)) return nil } diff --git a/src/pkg/mcp/utils.go b/src/pkg/mcp/utils.go index 2f68c063d..6cf6c87ff 100644 --- a/src/pkg/mcp/utils.go +++ b/src/pkg/mcp/utils.go @@ -3,11 +3,11 @@ package mcp import ( "fmt" "io" + "log/slog" "net/http" "os" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" ) const DocumentationEndpoint = "data" @@ -20,60 +20,60 @@ var KnowledgeBaseDir = client.StateDir var knowledgeBaseFilenames = [...]string{"knowledge_base.json", "samples_examples.json"} func SetupKnowledgeBase() error { - term.Debug("Setting up knowledge base") - term.Debugf("Attempting to download knowledge base files: %v", knowledgeBaseFilenames) + slog.Debug("Setting up knowledge base") + slog.Debug(fmt.Sprintf("Attempting to download knowledge base files: %v", knowledgeBaseFilenames)) // Create knowledge base directory if it doesn't exist - term.Debugf("Creating knowledge base directory: %s", KnowledgeBaseDir) + slog.Debug("Creating knowledge base directory: " + KnowledgeBaseDir) if err := os.MkdirAll(KnowledgeBaseDir, 0700); err != nil { - term.Error("Failed to create knowledge base directory", "error", err) + slog.Error(fmt.Sprintln("Failed to create knowledge base directory", "error", err)) return err } for _, filename := range knowledgeBaseFilenames { - term.Debugf("Downloading knowledge base file: %s", filename) + slog.Debug("Downloading knowledge base file: " + filename) err := downloadKnowledgeBase(KnowledgeBaseDir+"/"+filename, "/"+DocumentationEndpoint+"/"+filename) if err != nil { - term.Error("Failed to download knowledge base file", "error", err, "filename", filename) + slog.Error(fmt.Sprintln("Failed to download knowledge base file", "error", err, "filename", filename)) return err } } - term.Debug("Successfully downloaded knowledge base files") + slog.Debug("Successfully downloaded knowledge base files") return nil } func downloadKnowledgeBase(filepath string, path string) (err error) { // Create the file out, err := os.Create(filepath) - term.Debugf("Creating file: %s", filepath) + slog.Debug("Creating file: " + filepath) if err != nil { - term.Error("Failed to create file", "error", err, "filepath", filepath) + slog.Error(fmt.Sprintln("Failed to create file", "error", err, "filepath", filepath)) return err } defer out.Close() // Get the data resp, err := http.Get(AskDefangBaseURL + path) - term.Debugf("Downloading file: %s", path) + slog.Debug("Downloading file: " + path) if err != nil { - term.Error("Failed to download file", "error", err, "url", path) + slog.Error(fmt.Sprintln("Failed to download file", "error", err, "url", path)) return err } defer resp.Body.Close() // Check server response - term.Debugf("Checking server response: %s", resp.Status) + slog.Debug("Checking server response: " + resp.Status) if resp.StatusCode != http.StatusOK { - term.Error("Failed to download file", "error", fmt.Errorf("bad status: %s", resp.Status), "url", path) + slog.Error(fmt.Sprintln("Failed to download file", "error", fmt.Errorf("bad status: %s", resp.Status), "url", path)) return fmt.Errorf("bad status: %s", resp.Status) } // Writer the body to file - term.Debugf("Copying Using IO Copy: %s", filepath) + slog.Debug("Copying Using IO Copy: " + filepath) _, err = io.Copy(out, resp.Body) if err != nil { - term.Error("Failed to write file", "error", err, "filepath", filepath) + slog.Error(fmt.Sprintln("Failed to write file", "error", err, "filepath", filepath)) return err } diff --git a/src/pkg/migrate/heroku.go b/src/pkg/migrate/heroku.go index 5b1f33d56..4fceb8230 100644 --- a/src/pkg/migrate/heroku.go +++ b/src/pkg/migrate/heroku.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net/http" "os" "os/exec" @@ -14,7 +15,6 @@ import ( "github.com/AlecAivazis/survey/v2" ourHttp "github.com/DefangLabs/defang/src/pkg/http" "github.com/DefangLabs/defang/src/pkg/surveyor" - "github.com/DefangLabs/defang/src/pkg/term" ) type HerokuApplicationInfo struct { @@ -29,14 +29,14 @@ type HerokuApplicationInfo struct { func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterface, appName string) (HerokuApplicationInfo, error) { var applicationInfo HerokuApplicationInfo - term.Info("Identifying deployed dynos") + slog.Info("Identifying deployed dynos") dynos, err := client.ListDynos(ctx, appName) if err != nil { return HerokuApplicationInfo{}, fmt.Errorf("failed to list dynos: %w", err) } applicationInfo.Dynos = dynos - term.Debugf("Dynos for the selected application: %+v\n", dynos) + slog.Debug(fmt.Sprintf("Dynos for the selected application: %+v\n", dynos)) dynoSizes := make(map[string]HerokuDynoSize) for _, dyno := range dynos { @@ -48,7 +48,7 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf } applicationInfo.DynoSizes = dynoSizes - term.Debugf("Dyno sizes for the selected application: %+v\n", dynoSizes) + slog.Debug(fmt.Sprintf("Dyno sizes for the selected application: %+v\n", dynoSizes)) releaseTasks, err := client.GetReleaseTasks(ctx, appName) if err != nil { @@ -56,15 +56,15 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf } applicationInfo.ReleaseTasks = releaseTasks - term.Debugf("Release tasks for the selected application: %+v\n", releaseTasks) + slog.Debug(fmt.Sprintf("Release tasks for the selected application: %+v\n", releaseTasks)) - term.Info("Identifying configured addons") + slog.Info("Identifying configured addons") addons, err := client.ListAddons(ctx, appName) if err != nil { return HerokuApplicationInfo{}, fmt.Errorf("failed to list Heroku addons: %w", err) } applicationInfo.Addons = addons - term.Debugf("Addons for the selected application: %+v\n", addons) + slog.Debug(fmt.Sprintf("Addons for the selected application: %+v\n", addons)) for _, addon := range addons { if addon.AddonService.Name == "heroku-postgresql" { @@ -76,7 +76,7 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf } } - term.Debugf("Postgres info for the selected application: %+v\n", applicationInfo.PGInfo) + slog.Debug(fmt.Sprintf("Postgres info for the selected application: %+v\n", applicationInfo.PGInfo)) configVars, err := client.ListConfigVars(ctx, appName) if err != nil { @@ -101,7 +101,7 @@ func selectSourceApplication(surveyor surveyor.Surveyor, appNames []string) (str if selectedApp != "" { break } - term.Warn("No application selected. Please select an application.") + slog.Warn("No application selected. Please select an application.") } return selectedApp, nil @@ -319,14 +319,14 @@ func authenticateHerokuCLI() error { return nil } - term.Info("You need to authenticate with the Heroku CLI.") - term.Info("If a browser window does not open, run `heroku login` in a separate shell and try again.") + slog.Info("You need to authenticate with the Heroku CLI.") + slog.Info("If a browser window does not open, run `heroku login` in a separate shell and try again.") cmd = exec.Command("heroku", "login") // cmd needs to receive any keypress on stdin in order to open a browser cmd.Stdin = bytes.NewBuffer([]byte{'\n'}) _, err = cmd.Output() if err != nil { - term.Debugf("Failed to run `heroku login`: %v", err) + slog.Debug(fmt.Sprintf("Failed to run `heroku login`: %v", err)) return err } @@ -345,22 +345,22 @@ func getHerokuAuthTokenFromCLI() (string, error) { if err != nil { return "", fmt.Errorf("Heroku CLI is not installed: %w", err) } - term.Info("The Heroku CLI is installed, we'll use it to generate a short-lived authorization token") + slog.Info("The Heroku CLI is installed, we'll use it to generate a short-lived authorization token") err = authenticateHerokuCLI() if err != nil { - term.Debugf("Failed to authenticate Heroku CLI: %v", err) + slog.Debug(fmt.Sprintf("Failed to authenticate Heroku CLI: %v", err)) return "", err } - term.Debug("Successfully authenticated with Heroku") + slog.Debug("Successfully authenticated with Heroku") cmd := exec.Command("heroku", "authorizations:create", "--expires-in=300", "--json") output, err := cmd.Output() if err != nil { - term.Debugf("Failed to run `heroku authorizations:create`: %v", err) + slog.Debug(fmt.Sprintf("Failed to run `heroku authorizations:create`: %v", err)) return "", err } - term.Debugf("received output from heroku cli: %s", output) + slog.Debug(fmt.Sprintf("received output from heroku cli: %s", output)) var result struct { AccessToken struct { @@ -369,24 +369,24 @@ func getHerokuAuthTokenFromCLI() (string, error) { } err = json.Unmarshal(output, &result) if err != nil || result.AccessToken.Token == "" { - term.Debugf("Failed to parse Heroku CLI output: %v", err) + slog.Debug(fmt.Sprintf("Failed to parse Heroku CLI output: %v", err)) return "", err } - term.Debug("Successfully obtained Heroku token via CLI") + slog.Debug("Successfully obtained Heroku token via CLI") return result.AccessToken.Token, nil } func getHerokuAuthToken() (string, error) { token := os.Getenv("HEROKU_API_KEY") if token != "" { - term.Debug("Using HEROKU_API_KEY environment variable") + slog.Debug("Using HEROKU_API_KEY environment variable") return token, nil } token = os.Getenv("HEROKU_AUTH_TOKEN") if token != "" { - term.Debug("Using HEROKU_AUTH_TOKEN environment variable") + slog.Debug("Using HEROKU_AUTH_TOKEN environment variable") return token, nil } @@ -395,7 +395,7 @@ func getHerokuAuthToken() (string, error) { return token, nil } - term.Debug("Prompting for Heroku auth token") + slog.Debug("Prompting for Heroku auth token") for { err := survey.AskOne(&survey.Password{ diff --git a/src/pkg/migrate/migrate.go b/src/pkg/migrate/migrate.go index b1e8802b6..0a2d096fb 100644 --- a/src/pkg/migrate/migrate.go +++ b/src/pkg/migrate/migrate.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "runtime" "slices" "strings" @@ -12,7 +13,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/cli/compose" "github.com/DefangLabs/defang/src/pkg/surveyor" - "github.com/DefangLabs/defang/src/pkg/term" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" "go.yaml.in/yaml/v4" ) @@ -26,7 +26,7 @@ func InteractiveSetup(ctx context.Context, fabric client.FabricClient, surveyor sourcePlatform = selected } - term.Debugf("Selected source platform: %s", sourcePlatform) + slog.Debug(fmt.Sprintf("Selected source platform: %s", sourcePlatform)) var composeFileContents string var err error @@ -56,7 +56,7 @@ func setupFromHeroku(ctx context.Context, fabric client.FabricClient, surveyor s // Here you can add logic to process the retrieved apps and set up the project accordingly // For now, we just print the apps - term.Debugf("Your Heroku applications: %+v\n", apps) + slog.Debug(fmt.Sprintf("Your Heroku applications: %+v\n", apps)) appNames := make([]string, len(apps)) for i, app := range apps { @@ -68,23 +68,23 @@ func setupFromHeroku(ctx context.Context, fabric client.FabricClient, surveyor s return "", fmt.Errorf("failed to select source application: %w", err) } - term.Infof("Collecting information about %q...", sourceApp) + slog.Info(fmt.Sprintf("Collecting information about %q...", sourceApp)) applicationInfo, err := collectHerokuApplicationInfo(ctx, herokuClient, sourceApp) if err != nil { return "", fmt.Errorf("failed to collect Heroku application info: %w", err) } - term.Debugf("Application info: %+v\n", applicationInfo) + slog.Debug(fmt.Sprintf("Application info: %+v\n", applicationInfo)) sanitizedApplicationInfo, err := sanitizeHerokuApplicationInfo(applicationInfo) if err != nil { return "", fmt.Errorf("failed to sanitize Heroku application info: %w", err) } - term.Debugf("Sanitized application info: %+v\n", sanitizedApplicationInfo) + slog.Debug(fmt.Sprintf("Sanitized application info: %+v\n", sanitizedApplicationInfo)) - term.Info("Generating compose file...") + slog.Info("Generating compose file...") composeFileContents, err := generateComposeFile(ctx, fabric, defangv1.SourcePlatform_SOURCE_PLATFORM_HEROKU, sourceApp, sanitizedApplicationInfo) if err != nil { @@ -129,7 +129,7 @@ func generateComposeFile(ctx context.Context, fabric client.FabricClient, platfo } responseStr := string(resp.Compose) - term.Debugf("Received compose response: %+v", responseStr) + slog.Debug(fmt.Sprintf("Received compose response: %+v", responseStr)) // assume the response is markdown, // extract the contents of the first code block if there is one @@ -140,7 +140,7 @@ func generateComposeFile(ctx context.Context, fabric client.FabricClient, platfo composeContent = responseStr } else { previousError = err.Error() - term.Debugf("Failed to extract code block: %v. Retrying...", err) + slog.Debug(fmt.Sprintf("Failed to extract code block: %v. Retrying...", err)) continue } } @@ -156,7 +156,7 @@ func generateComposeFile(ctx context.Context, fabric client.FabricClient, platfo _, err = compose.LoadFromContentWithInterpolation(ctx, []byte(composeContent), projectName) if err != nil { previousError = err.Error() - term.Debugf("Invalid compose file received: %v. Retrying...", err) + slog.Debug(fmt.Sprintf("Invalid compose file received: %v. Retrying...", err)) continue } diff --git a/src/pkg/migrate/platform.go b/src/pkg/migrate/platform.go index 847d65538..62fbcc6ce 100644 --- a/src/pkg/migrate/platform.go +++ b/src/pkg/migrate/platform.go @@ -2,10 +2,10 @@ package migrate import ( "fmt" + "log/slog" "github.com/AlecAivazis/survey/v2" "github.com/DefangLabs/defang/src/pkg/surveyor" - "github.com/DefangLabs/defang/src/pkg/term" ) type SourcePlatform string @@ -70,6 +70,6 @@ func selectSourcePlatform(surveyor surveyor.Surveyor) (error, SourcePlatform) { return nil, sourcePlatform } - term.Warnf("Invalid source platform selected: %s. Please try again.", selectedOption) + slog.Warn(fmt.Sprintf("Invalid source platform selected: %s. Please try again.", selectedOption)) } } diff --git a/src/pkg/session/session.go b/src/pkg/session/session.go index 0a2c34fae..ed396eed4 100644 --- a/src/pkg/session/session.go +++ b/src/pkg/session/session.go @@ -3,6 +3,7 @@ package session import ( "context" "fmt" + "log/slog" "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli" @@ -11,7 +12,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/clouds/aws" "github.com/DefangLabs/defang/src/pkg/modes" "github.com/DefangLabs/defang/src/pkg/stacks" - "github.com/DefangLabs/defang/src/pkg/term" ) type StacksManager interface { @@ -67,7 +67,7 @@ func (sl *SessionLoader) LoadSession(ctx context.Context) (*Session, error) { if stack.Provider == client.ProviderDefang { extraMsg = "; consider using BYOC (https://s.defang.io/byoc)" } - term.Infof("Using the %q stack on %s from %s%s", stack.Name, stack.Provider, whence, extraMsg) + slog.Info(fmt.Sprintf("Using the %q stack on %s from %s%s", stack.Name, stack.Provider, whence, extraMsg)) printProviderMismatchWarnings(ctx, stack.Provider) return session, nil @@ -106,28 +106,28 @@ func printProviderMismatchWarnings(ctx context.Context, provider client.Provider // Ignore any env vars when explicitly using the Defang playground provider // Defaults to defang provider in non-interactive mode if env := pkg.AwsInEnv(); env != "" { - term.Warnf("AWS environment variables were detected (%v); did you forget --provider=aws or DEFANG_PROVIDER=aws?", env) + slog.Warn(fmt.Sprintf("AWS environment variables were detected (%v); did you forget --provider=aws or DEFANG_PROVIDER=aws?", env)) } if env := pkg.DoInEnv(); env != "" { - term.Warnf("DigitalOcean environment variable was detected (%v); did you forget --provider=digitalocean or DEFANG_PROVIDER=digitalocean?", env) + slog.Warn(fmt.Sprintf("DigitalOcean environment variable was detected (%v); did you forget --provider=digitalocean or DEFANG_PROVIDER=digitalocean?", env)) } if env := pkg.GcpInEnv(); env != "" { - term.Warnf("GCP project environment variable was detected (%v); did you forget --provider=gcp or DEFANG_PROVIDER=gcp?", env) + slog.Warn(fmt.Sprintf("GCP project environment variable was detected (%v); did you forget --provider=gcp or DEFANG_PROVIDER=gcp?", env)) } } switch provider { case client.ProviderAWS: if !awsInConfig(ctx) { - term.Warn("AWS provider was selected, but AWS environment is not set") + slog.Warn("AWS provider was selected, but AWS environment is not set") } case client.ProviderDO: if env := pkg.DoInEnv(); env == "" { - term.Warn("DigitalOcean provider was selected, but DIGITALOCEAN_TOKEN environment variable is not set") + slog.Warn("DigitalOcean provider was selected, but DIGITALOCEAN_TOKEN environment variable is not set") } case client.ProviderGCP: if env := pkg.GcpInEnv(); env == "" { - term.Warnf("GCP provider was selected, but no GCP project environment variable is set (%v)", pkg.GCPProjectEnvVars) + slog.Warn(fmt.Sprintf("GCP provider was selected, but no GCP project environment variable is set (%v)", pkg.GCPProjectEnvVars)) } } } diff --git a/src/pkg/setup/setup.go b/src/pkg/setup/setup.go index 53036ea1b..75f19cc34 100644 --- a/src/pkg/setup/setup.go +++ b/src/pkg/setup/setup.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "strings" @@ -144,7 +145,7 @@ func (s *SetupClient) AIGenerate(ctx context.Context) (SetupResult, error) { track.Evt(GenerateStartedEvt, P("language", prompt.Language), P("description", prompt.Description), P("folder", folder), P("model", prompt.ModelID)) beforeGenerate(folder) - term.Info("Working on it. This may take 1 or 2 minutes...") + slog.Info("Working on it. This may take 1 or 2 minutes...") args := cli.GenerateArgs{ Description: prompt.Description, Folder: folder, @@ -180,7 +181,7 @@ func (s *SetupClient) CloneSample(ctx context.Context, sample string) (SetupResu } track.Evt(GenerateStartedEvt, P("sample", sample), P("folder", folder)) beforeGenerate(folder) - term.Info("Fetching sample from the Defang repository...") + slog.Info("Fetching sample from the Defang repository...") err = cli.InitFromSamples(ctx, folder, []string{sample}) if err != nil { return SetupResult{}, err @@ -231,7 +232,7 @@ func (s *SetupClient) MigrateFromHeroku(ctx context.Context) (SetupResult, error return SetupResult{}, err } - term.Info("Ok, let's create a compose file for your existing deployment.") + slog.Info("Ok, let's create a compose file for your existing deployment.") heroku := migrate.NewHerokuClient() composeFileContents, err := migrate.InteractiveSetup(ctx, s.Fabric, s.Surveyor, heroku, migrate.SourcePlatformHeroku) if err != nil { @@ -243,9 +244,9 @@ func (s *SetupClient) MigrateFromHeroku(ctx context.Context) (SetupResult, error return SetupResult{}, fmt.Errorf("failed to write compose file: %w", err) } - term.Info("Compose file written to", composeFilePath) - term.Info("Your application is now ready to deploy with Defang.") - term.Info("For next steps, visit https://s.defang.io/from-heroku") + slog.Info(fmt.Sprintln("Compose file written to", composeFilePath)) + slog.Info("Your application is now ready to deploy with Defang.") + slog.Info("For next steps, visit https://s.defang.io/from-heroku") return SetupResult{Folder: "."}, nil } diff --git a/src/pkg/stacks/manager.go b/src/pkg/stacks/manager.go index ce3d00f57..7cd9c5f40 100644 --- a/src/pkg/stacks/manager.go +++ b/src/pkg/stacks/manager.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "path/filepath" "slices" @@ -14,7 +15,6 @@ import ( "github.com/DefangLabs/defang/src/pkg/cli/client" "github.com/DefangLabs/defang/src/pkg/elicitations" "github.com/DefangLabs/defang/src/pkg/modes" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/DefangLabs/defang/src/pkg/timeutils" defangv1 "github.com/DefangLabs/defang/src/protos/io/defang/v1" ) @@ -108,7 +108,7 @@ func (sm *manager) ListRemote(ctx context.Context) ([]ListItem, error) { bytes := stack.GetStackFile() params, err := NewParametersFromContent(name, bytes) if err != nil { - term.Warnf("Skipping invalid remote stack %s: %v\n", name, err) + slog.Warn(fmt.Sprintf("Skipping invalid remote stack %s: %v\n", name, err)) continue } // fill in missing fields from remote stack info @@ -150,7 +150,7 @@ func (sm *manager) Load(ctx context.Context, name string) (*Parameters, error) { params, err := sm.LoadLocal(name) if err != nil { if errors.Is(err, os.ErrNotExist) { - term.Infof("stack file not found, attempting to import from previous deployments: %v", err) + slog.Info(fmt.Sprintf("stack file not found, attempting to import from previous deployments: %v", err)) return sm.GetRemote(ctx, name) } return nil, err @@ -283,7 +283,7 @@ func (sm *manager) getSpecifiedStack(ctx context.Context, name string) (*Paramet return nil, "", fmt.Errorf("failed to save imported stack %q to local directory: %w", name, err) } if stackFilename != "" { - term.Infof("Stack %q loaded and saved to %q. Add this file to source control.", name, stackFilename) + slog.Info(fmt.Sprintf("Stack %q loaded and saved to %q. Add this file to source control.", name, stackFilename)) } return stack, whence + " and previous deployment", nil } @@ -324,7 +324,7 @@ func (sm *manager) getDefaultStack(ctx context.Context) (*Parameters, string, er return nil, whence, fmt.Errorf("using default stack %q for project %q, but the stack specifies COMPOSE_PROJECT_NAME=%q", res.Stack.Name, sm.projectName, pn) } if cf, ok := params.Variables["COMPOSE_FILE"]; ok { - term.Warnf("Using default stack %q for project %q, but the stack specifies COMPOSE_FILE=%q", res.Stack.Name, sm.projectName, cf) + slog.Warn(fmt.Sprintf("Using default stack %q for project %q, but the stack specifies COMPOSE_FILE=%q", res.Stack.Name, sm.projectName, cf)) } return params, whence, nil } diff --git a/src/pkg/stacks/stacks.go b/src/pkg/stacks/stacks.go index a01afb2f3..8bc9c68c5 100644 --- a/src/pkg/stacks/stacks.go +++ b/src/pkg/stacks/stacks.go @@ -3,6 +3,7 @@ package stacks import ( "errors" "fmt" + "log/slog" "os" "path/filepath" "regexp" @@ -180,13 +181,13 @@ func ListInDirectory(workingDirectory string) ([]ListItem, error) { filename := filename(workingDirectory, file.Name()) content, err := os.ReadFile(filename) if err != nil { - term.Warnf("Skipping unreadable stack file %s: %v\n", filename, err) + slog.Warn(fmt.Sprintf("Skipping unreadable stack file %s: %v\n", filename, err)) continue } params, err := NewParametersFromContent(file.Name(), content) if err != nil { - term.Warnf("Skipping invalid stack file %s: %v\n", filename, err) + slog.Warn(fmt.Sprintf("Skipping invalid stack file %s: %v\n", filename, err)) continue } stacks = append(stacks, ListItem{ @@ -245,7 +246,7 @@ func LoadStackEnv(params Parameters, overload bool) error { paramsMap := params.ToMap() for key, value := range paramsMap { if envValue, ok := currentEnv[key]; ok && envValue != value && !overload { - term.Warnf("The variable %q is set in both the stack and the environment. The value from the environment will be used.\n", key) + slog.Warn(fmt.Sprintf("The variable %q is set in both the stack and the environment. The value from the environment will be used.\n", key)) } if _, ok := currentEnv[key]; !ok || overload { err := os.Setenv(key, value) @@ -263,7 +264,7 @@ func filename(workingDirectory, stackname string) string { } func PrintCreateMessage(stackName string) { - term.Infof("A stack file has been created at `.defang/%s`.", stackName) + slog.Info(fmt.Sprintf("A stack file has been created at `.defang/%s`.", stackName)) term.Printf( "This file contains the configuration for this stack.\n"+ "We recommend you commit this file to source control, so it can be used by everyone on your team.\n"+ diff --git a/src/pkg/term/colorizer.go b/src/pkg/term/colorizer.go index b3bea0775..aa573c4f7 100644 --- a/src/pkg/term/colorizer.go +++ b/src/pkg/term/colorizer.go @@ -261,6 +261,31 @@ func (t *Term) Errorf(format string, v ...any) (int, error) { return output(t.err, ErrorColor, line) } +// WriteDebug writes a pre-formatted debug message (newline added if missing). +func (t *Term) WriteDebug(msg string) (int, error) { + if !t.DoDebug() { + return 0, nil + } + return output(t.err, DebugColor, ensurePrefix(debugPrefix, ensureNewline(msg))) +} + +// WriteInfo writes a pre-formatted info message (newline added if missing). +func (t *Term) WriteInfo(msg string) (int, error) { + return output(t.outOrErr(), InfoColor, ensurePrefix(infoPrefix, ensureNewline(msg))) +} + +// WriteWarn writes a pre-formatted warning message (newline added if missing) and accumulates it. +func (t *Term) WriteWarn(msg string) (int, error) { + msg = ensurePrefix(warnPrefix, ensureNewline(msg)) + t.warnings = append(t.warnings, msg) + return output(t.outOrErr(), WarnColor, msg) +} + +// WriteError writes a pre-formatted error message (newline added if missing). +func (t *Term) WriteError(msg string) (int, error) { + return output(t.err, ErrorColor, ensureNewline(msg)) +} + // Deprecated: use proper error handling instead func (t *Term) Fatal(msg any) { Error("Error:", msg) diff --git a/src/pkg/tokenstore/store.go b/src/pkg/tokenstore/store.go index 571f93870..af4aaa2a4 100644 --- a/src/pkg/tokenstore/store.go +++ b/src/pkg/tokenstore/store.go @@ -3,12 +3,11 @@ package tokenstore import ( "errors" "fmt" + "log/slog" "os" "path/filepath" "strings" "sync" - - "github.com/DefangLabs/defang/src/pkg/term" ) type TokenStore interface { @@ -33,7 +32,7 @@ func (s *LocalDirTokenStore) Save(key string, token string) error { return err } - term.Debug("Saving access token to", tokenFile) + slog.Debug(fmt.Sprintln("Saving access token to", tokenFile)) dir, _ := filepath.Split(tokenFile) if err := os.MkdirAll(dir, 0700); err != nil { return fmt.Errorf("failed to create token directory: %w", err) @@ -51,7 +50,7 @@ func (s *LocalDirTokenStore) Load(key string) (string, error) { if err != nil { return "", err } - term.Debug("Reading access token from file", tokenFile) + slog.Debug(fmt.Sprintln("Reading access token from file", tokenFile)) all, err := os.ReadFile(tokenFile) if err != nil { return "", fmt.Errorf("failed to read token: %w", err) @@ -80,7 +79,7 @@ func (s *LocalDirTokenStore) List(prefix string) ([]string, error) { return nil, fmt.Errorf("failed to resolve token store directory: %w", err) } if !strings.HasPrefix(dir, baseDir) { - term.Warnf("Invalid token prefix %q: resolved directory %q is outside of token store base directory %q", prefix, dir, baseDir) + slog.Warn(fmt.Sprintf("Invalid token prefix %q: resolved directory %q is outside of token store base directory %q", prefix, dir, baseDir)) return nil, errors.New("invalid token prefix") } @@ -110,7 +109,7 @@ func (s *LocalDirTokenStore) Delete(key string) error { if err := os.Remove(tokenFile); err != nil && !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("failed to delete token: %w", err) } - term.Debug("Removed token file:", tokenFile) + slog.Debug(fmt.Sprintln("Removed token file:", tokenFile)) return nil } diff --git a/src/pkg/track/track.go b/src/pkg/track/track.go index 088cc21fe..c1efde73f 100644 --- a/src/pkg/track/track.go +++ b/src/pkg/track/track.go @@ -1,12 +1,13 @@ package track import ( + "fmt" + "log/slog" "strings" "sync" "github.com/DefangLabs/defang/src/pkg" "github.com/DefangLabs/defang/src/pkg/cli/client" - "github.com/DefangLabs/defang/src/pkg/term" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -40,10 +41,10 @@ func Evt(name string, props ...Property) { } tracker := Tracker if tracker == nil { - term.Debugf("untracked event %q: %v", name, props) + slog.Debug(fmt.Sprintf("untracked event %q: %v", name, props)) return } - term.Debugf("tracking event %q: %v", name, props) + slog.Debug(fmt.Sprintf("tracking event %q: %v", name, props)) trackWG.Add(1) go func() { defer trackWG.Done() From 9f5827a9001b42c811007ed273c25a199911cbd4 Mon Sep 17 00:00:00 2001 From: jordanstephens Date: Mon, 20 Apr 2026 10:45:53 -0700 Subject: [PATCH 2/7] fix: address coderabbit review comments on slog migration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix %s→%v format verb for []string in agent/common debug log - Move "project name loaded" log after error check in agent/tools/services - Fix fmt.Sprintln→fmt.Sprintf in cd.go, config.go, teardown_cd.go (removes trailing newlines) - Include domain and err in cert generation failure log - Fix potential panic truncating multi-byte strings in logs/slog.go - Remove raw Heroku CLI output from debug log; return explicit error for empty token - Add isWithinBase() using filepath.Rel to fix sibling-directory traversal in tokenstore - Update slog.SetDefault after term.DefaultTerm reassignment in mcp.go - Add slog.Default() save/restore cleanup in test subtests to prevent global state leakage Co-Authored-By: Claude Sonnet 4.6 --- src/cmd/cli/command/config.go | 4 ++-- src/cmd/cli/command/mcp.go | 2 ++ src/pkg/agent/common/common.go | 2 +- src/pkg/agent/tools/services.go | 2 +- src/pkg/cli/cd.go | 2 +- src/pkg/cli/cert.go | 2 +- src/pkg/cli/client/byoc/aws/byoc_test.go | 2 ++ src/pkg/cli/configList_test.go | 4 ++++ src/pkg/cli/configResolution_test.go | 4 ++++ src/pkg/cli/deploymentsList_test.go | 8 ++++++++ src/pkg/cli/tail_test.go | 4 ++++ src/pkg/cli/teardown_cd.go | 2 +- src/pkg/logs/slog.go | 8 ++++++-- src/pkg/migrate/heroku.go | 9 +++++++-- src/pkg/tokenstore/store.go | 14 +++++++++++--- 15 files changed, 55 insertions(+), 14 deletions(-) diff --git a/src/cmd/cli/command/config.go b/src/cmd/cli/command/config.go index 5d21e1763..a742406aa 100644 --- a/src/cmd/cli/command/config.go +++ b/src/cmd/cli/command/config.go @@ -165,9 +165,9 @@ var configSetCmd = &cobra.Command{ if err != nil { errs = append(errs, err) } else if ifNotSet && !didSet { - slog.Info(fmt.Sprintln("Config", name, "is already set; skipping due to --if-not-set flag")) + slog.Info(fmt.Sprintf("Config %s is already set; skipping due to --if-not-set flag", name)) } else { - slog.Info(fmt.Sprintln("Updated value for", name)) + slog.Info("Updated value for " + name) } } diff --git a/src/cmd/cli/command/mcp.go b/src/cmd/cli/command/mcp.go index 72b5fc889..fd34520c4 100644 --- a/src/cmd/cli/command/mcp.go +++ b/src/cmd/cli/command/mcp.go @@ -8,6 +8,7 @@ import ( "github.com/DefangLabs/defang/src/pkg/agent/tools" "github.com/DefangLabs/defang/src/pkg/cli/client" + "github.com/DefangLabs/defang/src/pkg/logs" "github.com/DefangLabs/defang/src/pkg/mcp" "github.com/DefangLabs/defang/src/pkg/term" "github.com/mark3labs/mcp-go/server" @@ -45,6 +46,7 @@ var mcpServerCmd = &cobra.Command{ defer logFile.Close() term.DefaultTerm = term.NewTerm(os.Stdin, logFile, logFile) term.SetDebug(true) + slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) } // Create a new MCP server diff --git a/src/pkg/agent/common/common.go b/src/pkg/agent/common/common.go index 175251cf3..ce7dcba90 100644 --- a/src/pkg/agent/common/common.go +++ b/src/pkg/agent/common/common.go @@ -48,7 +48,7 @@ func ConfigureAgentLoader(params LoaderParams) (*compose.Loader, error) { } composeFilePaths := params.ComposeFilePaths if len(composeFilePaths) > 0 { - slog.Debug(fmt.Sprintf("Compose file paths provided: %s", composeFilePaths)) + slog.Debug(fmt.Sprintf("Compose file paths provided: %v", composeFilePaths)) slog.Debug("Function invoked: compose.NewLoader") return compose.NewLoader(compose.WithPath(composeFilePaths...)), nil } diff --git a/src/pkg/agent/tools/services.go b/src/pkg/agent/tools/services.go index 335104e20..d89d9c2d0 100644 --- a/src/pkg/agent/tools/services.go +++ b/src/pkg/agent/tools/services.go @@ -44,13 +44,13 @@ func HandleServicesTool(ctx context.Context, loader client.Loader, params Servic } slog.Debug("Function invoked: cli.LoadProjectNameWithFallback") projectName, err := cli.LoadProjectNameWithFallback(ctx, loader, provider) - slog.Debug("Project name loaded: " + projectName) if err != nil { if strings.Contains(err.Error(), "no projects found") { return "no projects found on Playground", nil } return "", fmt.Errorf("failed to load project name: %w", err) } + slog.Debug("Project name loaded: " + projectName) serviceResponse, err := cli.GetServices(ctx, projectName, provider) if err != nil { diff --git a/src/pkg/cli/cd.go b/src/pkg/cli/cd.go index 5d7bec497..cc5b98e08 100644 --- a/src/pkg/cli/cd.go +++ b/src/pkg/cli/cd.go @@ -66,7 +66,7 @@ func CdCommand(ctx context.Context, projectName string, provider client.Provider StatesUrl: statesUrl, }) if err != nil { - slog.Debug(fmt.Sprintln("Failed to record deployment:", err)) + slog.Debug(fmt.Sprintf("Failed to record deployment: %v", err)) slog.Warn("Unable to update deployment history; deployment will proceed anyway.") } } diff --git a/src/pkg/cli/cert.go b/src/pkg/cli/cert.go index 76f1ddeb8..f4e2c5ca5 100644 --- a/src/pkg/cli/cert.go +++ b/src/pkg/cli/cert.go @@ -150,7 +150,7 @@ func generateCert(ctx context.Context, domain string, targets []string, client c } slog.Info(fmt.Sprintf("Triggering cert generation for %v", domain)) if err := triggerCertGeneration(ctx, domain); err != nil { - slog.Error("Error triggering cert generation, please try again") + slog.Error("Error triggering cert generation, please try again", "domain", domain, "err", err) return } diff --git a/src/pkg/cli/client/byoc/aws/byoc_test.go b/src/pkg/cli/client/byoc/aws/byoc_test.go index 8713138b3..933ec67be 100644 --- a/src/pkg/cli/client/byoc/aws/byoc_test.go +++ b/src/pkg/cli/client/byoc/aws/byoc_test.go @@ -361,6 +361,8 @@ aws_secret_access_key = wJalrXUtnFEMI/KDEFANG/bPxRfiCYEXAMPLEKEY for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) if tt.configFiles { diff --git a/src/pkg/cli/configList_test.go b/src/pkg/cli/configList_test.go index 4a66cf35c..5bcb918f6 100644 --- a/src/pkg/cli/configList_test.go +++ b/src/pkg/cli/configList_test.go @@ -55,6 +55,8 @@ func TestConfigList(t *testing.T) { t.Run("no configs", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := ConfigList(ctx, "emptyconfigs", &provider) @@ -72,6 +74,8 @@ func TestConfigList(t *testing.T) { t.Run("some configs", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := ConfigList(ctx, "test", &provider) diff --git a/src/pkg/cli/configResolution_test.go b/src/pkg/cli/configResolution_test.go index 849d8d37d..120636b9f 100644 --- a/src/pkg/cli/configResolution_test.go +++ b/src/pkg/cli/configResolution_test.go @@ -16,6 +16,8 @@ import ( func TestPrintConfigResolutionSummary(t *testing.T) { testAllConfigResolutionFiles(t, "testdata/config-resolution", func(t *testing.T, name, path string) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) loader := compose.NewLoader(compose.WithPath(path)) @@ -56,6 +58,8 @@ func TestPrintConfigResolutionSummary(t *testing.T) { func TestPrintRedactedConfigResolutionSummary(t *testing.T) { testAllConfigResolutionFiles(t, "testdata/redact-config", func(t *testing.T, name, path string) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) loader := compose.NewLoader(compose.WithPath(path)) diff --git a/src/pkg/cli/deploymentsList_test.go b/src/pkg/cli/deploymentsList_test.go index 124ebd8f5..87026573f 100644 --- a/src/pkg/cli/deploymentsList_test.go +++ b/src/pkg/cli/deploymentsList_test.go @@ -64,6 +64,8 @@ func TestDeploymentsList(t *testing.T) { t.Run("no deployments", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_HISTORY, @@ -84,6 +86,8 @@ func TestDeploymentsList(t *testing.T) { t.Run("some deployments", func(t *testing.T) { stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_HISTORY, @@ -137,6 +141,8 @@ func TestActiveDeployments(t *testing.T) { t.Run("no active deployments", func(t *testing.T) { fabricServer.testDeploymentsData = nil stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ @@ -174,6 +180,8 @@ func TestActiveDeployments(t *testing.T) { fabricServer.testDeploymentsData = activeDeployments stdout, _ := term.SetupTestTerm(t) + prevLogger := slog.Default() + t.Cleanup(func() { slog.SetDefault(prevLogger) }) slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) err := DeploymentsList(ctx, grpcClient, ListDeploymentsParams{ ListType: defangv1.DeploymentType_DEPLOYMENT_TYPE_ACTIVE, diff --git a/src/pkg/cli/tail_test.go b/src/pkg/cli/tail_test.go index 7d4bbfcd4..083be2f8e 100644 --- a/src/pkg/cli/tail_test.go +++ b/src/pkg/cli/tail_test.go @@ -123,9 +123,11 @@ func TestTail(t *testing.T) { testTerm.ForceColor(true) defaultTerm := term.DefaultTerm term.DefaultTerm = testTerm + prevLogger := slog.Default() slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) t.Cleanup(func() { term.DefaultTerm = defaultTerm + slog.SetDefault(prevLogger) }) const projectName = "project1" @@ -239,10 +241,12 @@ func setupTestTerminal() (*bytes.Buffer, *bytes.Buffer, func()) { testTerm.ForceColor(true) defaultTerm := term.DefaultTerm term.DefaultTerm = testTerm + prevLogger := slog.Default() slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) cleanup := func() { term.DefaultTerm = defaultTerm + slog.SetDefault(prevLogger) } return &stdout, &stderr, cleanup diff --git a/src/pkg/cli/teardown_cd.go b/src/pkg/cli/teardown_cd.go index 8bdee941d..291a19309 100644 --- a/src/pkg/cli/teardown_cd.go +++ b/src/pkg/cli/teardown_cd.go @@ -36,7 +36,7 @@ func TearDownCD(ctx context.Context, provider client.Provider, force bool) error if len(stacks) > 0 { slog.Info("Some stacks are currently deployed. Run the following commands to tear them down:") for _, stack := range stacks { - slog.Info(fmt.Sprintf(" `defang down --workspace %s --project-name %s --stack %s`\n", stack.Workspace, stack.Project, stack.Stack)) + slog.Info(fmt.Sprintf(" `defang down --workspace %s --project-name %s --stack %s`", stack.Workspace, stack.Project, stack.Stack)) } if !force { return ErrExistingStacks diff --git a/src/pkg/logs/slog.go b/src/pkg/logs/slog.go index f06af7682..4da8e0c11 100644 --- a/src/pkg/logs/slog.go +++ b/src/pkg/logs/slog.go @@ -41,7 +41,9 @@ func (h *termHandler) Handle(ctx context.Context, r slog.Record) error { strVal := a.String() if len(strVal) > 80 { runes := []rune(strVal) - strVal = string(runes[:77]) + "..." + if len(runes) > 77 { + strVal = string(runes[:77]) + "..." + } } sb.WriteString(strVal) return true @@ -86,7 +88,9 @@ func (h *termHandler) WithAttrs(attrs []slog.Attr) slog.Handler { strVal := a.String() if len(strVal) > 80 { runes := []rune(strVal) - strVal = string(runes[:77]) + "..." + if len(runes) > 77 { + strVal = string(runes[:77]) + "..." + } } sb.WriteString(strVal) } diff --git a/src/pkg/migrate/heroku.go b/src/pkg/migrate/heroku.go index 4fceb8230..29d47d2c7 100644 --- a/src/pkg/migrate/heroku.go +++ b/src/pkg/migrate/heroku.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "log/slog" @@ -360,7 +361,7 @@ func getHerokuAuthTokenFromCLI() (string, error) { return "", err } - slog.Debug(fmt.Sprintf("received output from heroku cli: %s", output)) + slog.Debug("Received output from heroku CLI authorization command") var result struct { AccessToken struct { @@ -368,10 +369,14 @@ func getHerokuAuthTokenFromCLI() (string, error) { } `json:"access_token"` } err = json.Unmarshal(output, &result) - if err != nil || result.AccessToken.Token == "" { + if err != nil { slog.Debug(fmt.Sprintf("Failed to parse Heroku CLI output: %v", err)) return "", err } + if result.AccessToken.Token == "" { + slog.Debug("Heroku CLI output did not include an access token") + return "", errors.New("heroku CLI returned an empty access token") + } slog.Debug("Successfully obtained Heroku token via CLI") return result.AccessToken.Token, nil diff --git a/src/pkg/tokenstore/store.go b/src/pkg/tokenstore/store.go index af4aaa2a4..fdcc639d8 100644 --- a/src/pkg/tokenstore/store.go +++ b/src/pkg/tokenstore/store.go @@ -58,6 +58,14 @@ func (s *LocalDirTokenStore) Load(key string) (string, error) { return string(all), nil } +func isWithinBase(baseDir, target string) bool { + rel, err := filepath.Rel(baseDir, target) + if err != nil { + return false + } + return rel == "." || (rel != ".." && !strings.HasPrefix(rel, ".."+string(os.PathSeparator)) && !filepath.IsAbs(rel)) +} + func (s *LocalDirTokenStore) List(prefix string) ([]string, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -78,7 +86,7 @@ func (s *LocalDirTokenStore) List(prefix string) ([]string, error) { if err != nil { return nil, fmt.Errorf("failed to resolve token store directory: %w", err) } - if !strings.HasPrefix(dir, baseDir) { + if !isWithinBase(baseDir, dir) { slog.Warn(fmt.Sprintf("Invalid token prefix %q: resolved directory %q is outside of token store base directory %q", prefix, dir, baseDir)) return nil, errors.New("invalid token prefix") } @@ -109,7 +117,7 @@ func (s *LocalDirTokenStore) Delete(key string) error { if err := os.Remove(tokenFile); err != nil && !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("failed to delete token: %w", err) } - slog.Debug(fmt.Sprintln("Removed token file:", tokenFile)) + slog.Debug("Removed token file: " + tokenFile) return nil } @@ -129,7 +137,7 @@ func (s *LocalDirTokenStore) getTokenFile(key string) (string, error) { if err != nil { return "", fmt.Errorf("failed to resolve token store directory: %w", err) } - if !strings.HasPrefix(absTokenFilePath, absDir) { + if !isWithinBase(absDir, absTokenFilePath) { return "", errors.New("invalid token key") } return absTokenFilePath, nil From 181a26e37c8066c9c1e5cc527ab7e82b04d0320e Mon Sep 17 00:00:00 2001 From: jordanstephens Date: Mon, 20 Apr 2026 11:31:13 -0700 Subject: [PATCH 3/7] fix: migrate remaining term.Println/Printf calls to slog Converts single-line informational messages from term.Println/Printf to the appropriate slog level (Info, Warn, Debug, or Error). Data output, terminal-control operations, interactive prompts, and multi-line formatted instructions are left as term calls. Co-Authored-By: Claude Sonnet 4.6 --- src/cmd/cli/command/commands.go | 6 +++--- src/cmd/cli/command/compose.go | 2 +- src/cmd/cli/command/compose_test.go | 2 +- src/cmd/cli/command/mcp.go | 4 ++-- src/pkg/auth/auth.go | 2 +- src/pkg/cli/cd.go | 2 +- src/pkg/cli/compose/loader.go | 2 +- src/pkg/cli/composeUp.go | 2 +- src/pkg/cli/generate.go | 4 +--- src/pkg/cli/new.go | 3 +-- src/pkg/clouds/aws/login.go | 2 +- src/pkg/clouds/gcp/api.go | 3 +-- src/pkg/clouds/gcp/login.go | 2 +- src/pkg/debug/debug.go | 3 ++- src/pkg/login/agree_tos.go | 2 +- 15 files changed, 19 insertions(+), 22 deletions(-) diff --git a/src/cmd/cli/command/commands.go b/src/cmd/cli/command/commands.go index c125fc803..120b1301d 100644 --- a/src/cmd/cli/command/commands.go +++ b/src/cmd/cli/command/commands.go @@ -102,15 +102,15 @@ func Execute(ctx context.Context) error { } if global.HasTty && term.HadWarnings() { - term.Println("For help with warnings, check our FAQ at https://s.defang.io/warnings") + slog.Info("For help with warnings, check our FAQ at https://s.defang.io/warnings") } if global.HasTty && !global.HideUpdate && pkg.RandomIndex(10) == 0 { if latest, err := github.GetLatestReleaseTag(ctx); err == nil && isNewer(GetCurrentVersion(), latest) { slog.Debug(fmt.Sprintln("Latest Version:", latest, "Current Version:", GetCurrentVersion())) - term.Println("A newer version of the CLI is available at https://github.com/DefangLabs/defang/releases/latest") + slog.Info("A newer version of the CLI is available at https://github.com/DefangLabs/defang/releases/latest") if pkg.RandomIndex(10) == 0 && !pkg.GetenvBool("DEFANG_HIDE_HINTS") { - term.Println("To silence these notices, do: export DEFANG_HIDE_UPDATE=1") + slog.Info("To silence these notices, do: export DEFANG_HIDE_UPDATE=1") } } } diff --git a/src/cmd/cli/command/compose.go b/src/cmd/cli/command/compose.go index e9b0864a4..d3e22ce2a 100644 --- a/src/cmd/cli/command/compose.go +++ b/src/cmd/cli/command/compose.go @@ -38,7 +38,7 @@ func printPlaygroundPortalServiceURLs(serviceInfos []*defangv1.ServiceInfo) { if global.Stack.Provider == client.ProviderDefang && global.FabricAddr == client.DefaultFabricAddr { slog.Info("Monitor your services' status in the defang portal") for _, serviceInfo := range serviceInfos { - term.Println(" -", SERVICE_PORTAL_URL+"/"+serviceInfo.Service.Name) + slog.Info(" - " + SERVICE_PORTAL_URL + "/" + serviceInfo.Service.Name) } } } diff --git a/src/cmd/cli/command/compose_test.go b/src/cmd/cli/command/compose_test.go index c1b8c4d71..0471b79cb 100644 --- a/src/cmd/cli/command/compose_test.go +++ b/src/cmd/cli/command/compose_test.go @@ -42,7 +42,7 @@ func TestPrintPlaygroundPortalServiceURLs(t *testing.T) { Service: &defangv1.Service{Name: "service1"}, }}) const want = ` * Monitor your services' status in the defang portal - - https://portal.defang.io/service/service1 + * - https://portal.defang.io/service/service1 ` if got := stdout.String(); got != want { t.Errorf("got %q, want %q", got, want) diff --git a/src/cmd/cli/command/mcp.go b/src/cmd/cli/command/mcp.go index fd34520c4..217d4a3d9 100644 --- a/src/cmd/cli/command/mcp.go +++ b/src/cmd/cli/command/mcp.go @@ -60,12 +60,12 @@ var mcpServerCmd = &cobra.Command{ } // Start the server - term.Println("Starting Defang MCP server") + slog.Info("Starting Defang MCP server") if err := server.ServeStdio(s); err != nil { return err } - term.Println("Server shutdown") + slog.Info("Server shutdown") return nil }, diff --git a/src/pkg/auth/auth.go b/src/pkg/auth/auth.go index da13893b2..e7781e270 100644 --- a/src/pkg/auth/auth.go +++ b/src/pkg/auth/auth.go @@ -58,7 +58,7 @@ func StartAuthCodeFlow(ctx context.Context, mcpFlow LoginFlow, saveToken func(st // Create a shortened authorize URL by only including the variable parts (state and code_challenge) authorizeUrl := GetAuthorizeUrl("cli", ar.state, ar.challenge) - term.Println("Please visit the following URL to log in: (Right click the URL or press ENTER to open browser)") + slog.Info("Please visit the following URL to log in: (Right click the URL or press ENTER to open browser)") n, _ := term.Printf(" %s", authorizeUrl) defer term.Print("\r", strings.Repeat(" ", n), "\r") // TODO: use termenv to clear line diff --git a/src/pkg/cli/cd.go b/src/pkg/cli/cd.go index cc5b98e08..daaf6ca98 100644 --- a/src/pkg/cli/cd.go +++ b/src/pkg/cli/cd.go @@ -157,7 +157,7 @@ func CdListFromStorage(ctx context.Context, provider client.Provider, allRegions if allRegions { accountInfo.Region = "" } - term.Printf("No projects found in %v\n", accountInfo) + slog.Info(fmt.Sprintf("No projects found in %v", accountInfo)) } return term.Table(stacks, "Project", "Stack", "Workspace", "CdRegion") diff --git a/src/pkg/cli/compose/loader.go b/src/pkg/cli/compose/loader.go index 210e6f079..8f2b98c22 100644 --- a/src/pkg/cli/compose/loader.go +++ b/src/pkg/cli/compose/loader.go @@ -121,7 +121,7 @@ func (l *Loader) loadProject(ctx context.Context, suppressWarn bool) (*Project, if term.DoDebug() { b, _ := yaml.Marshal(project) - term.Println(string(b)) + slog.Debug(string(b)) } l.cached = project diff --git a/src/pkg/cli/composeUp.go b/src/pkg/cli/composeUp.go index 183c2a3c6..e5800a564 100644 --- a/src/pkg/cli/composeUp.go +++ b/src/pkg/cli/composeUp.go @@ -214,7 +214,7 @@ func ComposeUp(ctx context.Context, fabric client.FabricClient, provider client. } if term.DoDebug() { - term.Println("Project:", project.Name) + slog.Debug("Project: " + project.Name) for _, serviceInfo := range resp.Services { PrintObject(serviceInfo.Service.Name, serviceInfo) } diff --git a/src/pkg/cli/generate.go b/src/pkg/cli/generate.go index d33da10d6..93f852c92 100644 --- a/src/pkg/cli/generate.go +++ b/src/pkg/cli/generate.go @@ -43,8 +43,6 @@ func GenerateWithAI(ctx context.Context, client client.FabricClient, args Genera term.Printc(term.DebugColor, file.Name+"\n```") term.Printc(term.DebugColor, file.Content) term.Printc(term.DebugColor, "```") - term.Println("") - term.Println("") } } @@ -55,7 +53,7 @@ func GenerateWithAI(ctx context.Context, client client.FabricClient, args Genera } for _, file := range response.Files { // Print the files that were generated - term.Println(" -", file.Name) + slog.Info(" - " + file.Name) // TODO: this will overwrite existing files if err = os.WriteFile(filepath.Join(args.Folder, file.Name), []byte(file.Content), 0644); err != nil { return nil, err diff --git a/src/pkg/cli/new.go b/src/pkg/cli/new.go index 4c069c351..ff9b0cf1f 100644 --- a/src/pkg/cli/new.go +++ b/src/pkg/cli/new.go @@ -14,7 +14,6 @@ import ( "strings" "github.com/DefangLabs/defang/src/pkg/http" - "github.com/DefangLabs/defang/src/pkg/term" ) var ErrSampleNotFound = errors.New("sample not found") @@ -102,7 +101,7 @@ func copyFromSamples(ctx context.Context, dir string, names []string, skipExisti prefix := fmt.Sprintf("%s-%s/samples/%s/", repo, branch, name) if base, ok := strings.CutPrefix(h.Name, prefix); ok && len(base) > 0 { sampleFound = true - term.Println(" -", base) + slog.Info(" - " + base) path := filepath.Join(dir, subdir, base) if h.FileInfo().IsDir() { if err := os.MkdirAll(path, 0755); err != nil { diff --git a/src/pkg/clouds/aws/login.go b/src/pkg/clouds/aws/login.go index cfc573719..aa882dd48 100644 --- a/src/pkg/clouds/aws/login.go +++ b/src/pkg/clouds/aws/login.go @@ -366,7 +366,7 @@ func (a *Aws) CrossDeviceLogin(ctx context.Context) (*awsTokenCache, error) { state := rand.Text()[:16] // random state for CSRF protection authURL := auth.GetAuthorizeUrl("aws", "cross", string(a.Region), state, pkce.Challenge) - term.Println("Please visit the following URL to log in to AWS: (Right click the URL or press ENTER to open browser)") + slog.Info("Please visit the following URL to log in to AWS: (Right click the URL or press ENTER to open browser)") term.Printf(" %s\n", authURL) term.Print("Enter the authorization code displayed in your browser: ") ctx, inputCh, done := term.OpenBrowserWithInputOnEnter(ctx, authURL) diff --git a/src/pkg/clouds/gcp/api.go b/src/pkg/clouds/gcp/api.go index 623d7155e..0f80a930c 100644 --- a/src/pkg/clouds/gcp/api.go +++ b/src/pkg/clouds/gcp/api.go @@ -8,7 +8,6 @@ import ( "time" "github.com/DefangLabs/defang/src/pkg" - "github.com/DefangLabs/defang/src/pkg/term" "google.golang.org/api/googleapi" "google.golang.org/api/serviceusage/v1" ) @@ -40,7 +39,7 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { if errors.As(err, &apiErr) && (apiErr.Code == 403 || apiErr.Code == 401) { return fmt.Errorf("permission denied when enabling services: %w", err) } - term.Printf("Error: %+v (%T)\n", err, err) + slog.Error(fmt.Sprintf("Error: %+v (%T)", err, err)) if i < maxAttempts-1 { slog.Debug(fmt.Sprintf("Failed to enable services, will retry in %v: %v\n", retryInterval, err)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { diff --git a/src/pkg/clouds/gcp/login.go b/src/pkg/clouds/gcp/login.go index c8ba00fe0..55f7f0fe1 100644 --- a/src/pkg/clouds/gcp/login.go +++ b/src/pkg/clouds/gcp/login.go @@ -327,7 +327,7 @@ func (gcp *Gcp) InteractiveLogin(ctx context.Context) (oauth2.TokenSource, error publicKeyBase64 := base64.URLEncoding.EncodeToString(pubKey[:]) authorizeURL := auth.GetAuthorizeUrl("gcp", publicKeyBase64) - term.Println("Please visit the following URL to log in to Google Cloud Platform: (Right click the URL or press ENTER to open browser)") + slog.Info("Please visit the following URL to log in to Google Cloud Platform: (Right click the URL or press ENTER to open browser)") term.Printf(" %s\n", authorizeURL) ctx, done := term.OpenBrowserOnEnter(ctx, authorizeURL) diff --git a/src/pkg/debug/debug.go b/src/pkg/debug/debug.go index a1f835b86..1a51032c9 100644 --- a/src/pkg/debug/debug.go +++ b/src/pkg/debug/debug.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" "time" @@ -191,7 +192,7 @@ func buildDeploymentDebugPrompt(debugConfig DebugConfig) string { if debugConfig.Project != nil { yaml, err := compose.MarshalYAML(debugConfig.Project) if err != nil { - term.Println("Failed to marshal compose project to YAML for debug:", err) + slog.Warn("Failed to marshal compose project to YAML for debug", "err", err) } prompt += fmt.Sprintf( "The compose files are at %s. The compose file is as follows:\n\n%s", diff --git a/src/pkg/login/agree_tos.go b/src/pkg/login/agree_tos.go index 64734fc5e..891f03a6d 100644 --- a/src/pkg/login/agree_tos.go +++ b/src/pkg/login/agree_tos.go @@ -26,7 +26,7 @@ func InteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) error return nil } - term.Println("Our latest terms of service can be found at https://s.defang.io/tos") + slog.Info("Our latest terms of service can be found at https://s.defang.io/tos") var agreeToS bool err := survey.AskOne(&survey.Confirm{ From 62bcc53cfa020e141ea38442a74640c9aeb9f427 Mon Sep 17 00:00:00 2001 From: jordanstephens Date: Tue, 21 Apr 2026 17:13:48 -0700 Subject: [PATCH 4/7] fix: replace slog with fmt.Println for user-facing messages originally from term.Println Co-Authored-By: Claude Sonnet 4.6 --- src/cmd/cli/command/commands.go | 6 +++--- src/cmd/cli/command/compose.go | 2 +- src/cmd/cli/command/mcp.go | 4 ++-- src/pkg/auth/auth.go | 2 +- src/pkg/cli/compose/loader.go | 2 +- src/pkg/cli/composeUp.go | 2 +- src/pkg/cli/generate.go | 5 ++++- src/pkg/cli/new.go | 2 +- src/pkg/clouds/aws/login.go | 2 +- src/pkg/clouds/gcp/login.go | 2 +- src/pkg/debug/debug.go | 3 +-- src/pkg/login/agree_tos.go | 2 +- 12 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/cmd/cli/command/commands.go b/src/cmd/cli/command/commands.go index 120b1301d..a4b6bfece 100644 --- a/src/cmd/cli/command/commands.go +++ b/src/cmd/cli/command/commands.go @@ -102,15 +102,15 @@ func Execute(ctx context.Context) error { } if global.HasTty && term.HadWarnings() { - slog.Info("For help with warnings, check our FAQ at https://s.defang.io/warnings") + fmt.Println("For help with warnings, check our FAQ at https://s.defang.io/warnings") } if global.HasTty && !global.HideUpdate && pkg.RandomIndex(10) == 0 { if latest, err := github.GetLatestReleaseTag(ctx); err == nil && isNewer(GetCurrentVersion(), latest) { slog.Debug(fmt.Sprintln("Latest Version:", latest, "Current Version:", GetCurrentVersion())) - slog.Info("A newer version of the CLI is available at https://github.com/DefangLabs/defang/releases/latest") + fmt.Println("A newer version of the CLI is available at https://github.com/DefangLabs/defang/releases/latest") if pkg.RandomIndex(10) == 0 && !pkg.GetenvBool("DEFANG_HIDE_HINTS") { - slog.Info("To silence these notices, do: export DEFANG_HIDE_UPDATE=1") + fmt.Println("To silence these notices, do: export DEFANG_HIDE_UPDATE=1") } } } diff --git a/src/cmd/cli/command/compose.go b/src/cmd/cli/command/compose.go index d3e22ce2a..6993e3d86 100644 --- a/src/cmd/cli/command/compose.go +++ b/src/cmd/cli/command/compose.go @@ -38,7 +38,7 @@ func printPlaygroundPortalServiceURLs(serviceInfos []*defangv1.ServiceInfo) { if global.Stack.Provider == client.ProviderDefang && global.FabricAddr == client.DefaultFabricAddr { slog.Info("Monitor your services' status in the defang portal") for _, serviceInfo := range serviceInfos { - slog.Info(" - " + SERVICE_PORTAL_URL + "/" + serviceInfo.Service.Name) + fmt.Println(" -", SERVICE_PORTAL_URL+"/"+serviceInfo.Service.Name) } } } diff --git a/src/cmd/cli/command/mcp.go b/src/cmd/cli/command/mcp.go index 217d4a3d9..ca5446093 100644 --- a/src/cmd/cli/command/mcp.go +++ b/src/cmd/cli/command/mcp.go @@ -60,12 +60,12 @@ var mcpServerCmd = &cobra.Command{ } // Start the server - slog.Info("Starting Defang MCP server") + fmt.Println("Starting Defang MCP server") if err := server.ServeStdio(s); err != nil { return err } - slog.Info("Server shutdown") + fmt.Println("Server shutdown") return nil }, diff --git a/src/pkg/auth/auth.go b/src/pkg/auth/auth.go index e7781e270..864495325 100644 --- a/src/pkg/auth/auth.go +++ b/src/pkg/auth/auth.go @@ -58,7 +58,7 @@ func StartAuthCodeFlow(ctx context.Context, mcpFlow LoginFlow, saveToken func(st // Create a shortened authorize URL by only including the variable parts (state and code_challenge) authorizeUrl := GetAuthorizeUrl("cli", ar.state, ar.challenge) - slog.Info("Please visit the following URL to log in: (Right click the URL or press ENTER to open browser)") + fmt.Println("Please visit the following URL to log in: (Right click the URL or press ENTER to open browser)") n, _ := term.Printf(" %s", authorizeUrl) defer term.Print("\r", strings.Repeat(" ", n), "\r") // TODO: use termenv to clear line diff --git a/src/pkg/cli/compose/loader.go b/src/pkg/cli/compose/loader.go index 8f2b98c22..0f42e286d 100644 --- a/src/pkg/cli/compose/loader.go +++ b/src/pkg/cli/compose/loader.go @@ -121,7 +121,7 @@ func (l *Loader) loadProject(ctx context.Context, suppressWarn bool) (*Project, if term.DoDebug() { b, _ := yaml.Marshal(project) - slog.Debug(string(b)) + fmt.Println(string(b)) } l.cached = project diff --git a/src/pkg/cli/composeUp.go b/src/pkg/cli/composeUp.go index e5800a564..400c0b8a6 100644 --- a/src/pkg/cli/composeUp.go +++ b/src/pkg/cli/composeUp.go @@ -214,7 +214,7 @@ func ComposeUp(ctx context.Context, fabric client.FabricClient, provider client. } if term.DoDebug() { - slog.Debug("Project: " + project.Name) + fmt.Println("Project:", project.Name) for _, serviceInfo := range resp.Services { PrintObject(serviceInfo.Service.Name, serviceInfo) } diff --git a/src/pkg/cli/generate.go b/src/pkg/cli/generate.go index 93f852c92..34a9f4c45 100644 --- a/src/pkg/cli/generate.go +++ b/src/pkg/cli/generate.go @@ -2,6 +2,7 @@ package cli import ( "context" + "fmt" "log/slog" "os" "path/filepath" @@ -43,6 +44,8 @@ func GenerateWithAI(ctx context.Context, client client.FabricClient, args Genera term.Printc(term.DebugColor, file.Name+"\n```") term.Printc(term.DebugColor, file.Content) term.Printc(term.DebugColor, "```") + fmt.Println("") + fmt.Println("") } } @@ -53,7 +56,7 @@ func GenerateWithAI(ctx context.Context, client client.FabricClient, args Genera } for _, file := range response.Files { // Print the files that were generated - slog.Info(" - " + file.Name) + fmt.Println(" -", file.Name) // TODO: this will overwrite existing files if err = os.WriteFile(filepath.Join(args.Folder, file.Name), []byte(file.Content), 0644); err != nil { return nil, err diff --git a/src/pkg/cli/new.go b/src/pkg/cli/new.go index ff9b0cf1f..a55f77ee3 100644 --- a/src/pkg/cli/new.go +++ b/src/pkg/cli/new.go @@ -101,7 +101,7 @@ func copyFromSamples(ctx context.Context, dir string, names []string, skipExisti prefix := fmt.Sprintf("%s-%s/samples/%s/", repo, branch, name) if base, ok := strings.CutPrefix(h.Name, prefix); ok && len(base) > 0 { sampleFound = true - slog.Info(" - " + base) + fmt.Println(" -", base) path := filepath.Join(dir, subdir, base) if h.FileInfo().IsDir() { if err := os.MkdirAll(path, 0755); err != nil { diff --git a/src/pkg/clouds/aws/login.go b/src/pkg/clouds/aws/login.go index aa882dd48..5383fd2f1 100644 --- a/src/pkg/clouds/aws/login.go +++ b/src/pkg/clouds/aws/login.go @@ -366,7 +366,7 @@ func (a *Aws) CrossDeviceLogin(ctx context.Context) (*awsTokenCache, error) { state := rand.Text()[:16] // random state for CSRF protection authURL := auth.GetAuthorizeUrl("aws", "cross", string(a.Region), state, pkce.Challenge) - slog.Info("Please visit the following URL to log in to AWS: (Right click the URL or press ENTER to open browser)") + fmt.Println("Please visit the following URL to log in to AWS: (Right click the URL or press ENTER to open browser)") term.Printf(" %s\n", authURL) term.Print("Enter the authorization code displayed in your browser: ") ctx, inputCh, done := term.OpenBrowserWithInputOnEnter(ctx, authURL) diff --git a/src/pkg/clouds/gcp/login.go b/src/pkg/clouds/gcp/login.go index 55f7f0fe1..ca0afe5e0 100644 --- a/src/pkg/clouds/gcp/login.go +++ b/src/pkg/clouds/gcp/login.go @@ -327,7 +327,7 @@ func (gcp *Gcp) InteractiveLogin(ctx context.Context) (oauth2.TokenSource, error publicKeyBase64 := base64.URLEncoding.EncodeToString(pubKey[:]) authorizeURL := auth.GetAuthorizeUrl("gcp", publicKeyBase64) - slog.Info("Please visit the following URL to log in to Google Cloud Platform: (Right click the URL or press ENTER to open browser)") + fmt.Println("Please visit the following URL to log in to Google Cloud Platform: (Right click the URL or press ENTER to open browser)") term.Printf(" %s\n", authorizeURL) ctx, done := term.OpenBrowserOnEnter(ctx, authorizeURL) diff --git a/src/pkg/debug/debug.go b/src/pkg/debug/debug.go index 1a51032c9..b3bc76038 100644 --- a/src/pkg/debug/debug.go +++ b/src/pkg/debug/debug.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log/slog" "strings" "time" @@ -192,7 +191,7 @@ func buildDeploymentDebugPrompt(debugConfig DebugConfig) string { if debugConfig.Project != nil { yaml, err := compose.MarshalYAML(debugConfig.Project) if err != nil { - slog.Warn("Failed to marshal compose project to YAML for debug", "err", err) + fmt.Println("Failed to marshal compose project to YAML for debug:", err) } prompt += fmt.Sprintf( "The compose files are at %s. The compose file is as follows:\n\n%s", diff --git a/src/pkg/login/agree_tos.go b/src/pkg/login/agree_tos.go index 891f03a6d..cd4997906 100644 --- a/src/pkg/login/agree_tos.go +++ b/src/pkg/login/agree_tos.go @@ -26,7 +26,7 @@ func InteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) error return nil } - slog.Info("Our latest terms of service can be found at https://s.defang.io/tos") + fmt.Println("Our latest terms of service can be found at https://s.defang.io/tos") var agreeToS bool err := survey.AskOne(&survey.Confirm{ From a74bdcb8263ba665eea5aa58d34d0bd47e8d8304 Mon Sep 17 00:00:00 2001 From: jordanstephens Date: Tue, 21 Apr 2026 17:24:59 -0700 Subject: [PATCH 5/7] fix: use slog.*Context when ctx is in scope at slog callsites Co-Authored-By: Claude Sonnet 4.6 --- src/cmd/cli/command/commands.go | 4 +-- src/cmd/cli/command/compose.go | 26 +++++++++--------- src/cmd/cli/command/compose_test.go | 31 +++++++++++++++++----- src/cmd/cli/command/generate.go | 2 +- src/cmd/cli/command/session.go | 2 +- src/cmd/cli/command/stack.go | 4 +-- src/cmd/cli/command/whoami.go | 4 +-- src/pkg/agent/tools/logs.go | 2 +- src/pkg/auth/auth.go | 4 +-- src/pkg/cli/cd.go | 12 ++++----- src/pkg/cli/cert.go | 32 +++++++++++------------ src/pkg/cli/client/byoc/aws/byoc.go | 20 +++++++------- src/pkg/cli/client/byoc/aws/domain.go | 2 +- src/pkg/cli/client/byoc/do/byoc.go | 4 +-- src/pkg/cli/client/byoc/gcp/byoc.go | 2 +- src/pkg/cli/client/byoc/gcp/stream.go | 24 ++++++++--------- src/pkg/cli/compose/context.go | 8 +++--- src/pkg/cli/compose/fixup.go | 14 +++++----- src/pkg/cli/composeDown.go | 2 +- src/pkg/cli/composeUp.go | 2 +- src/pkg/cli/configList.go | 2 +- src/pkg/cli/deploymentsList.go | 4 +-- src/pkg/cli/estimate.go | 4 +-- src/pkg/cli/generate.go | 4 +-- src/pkg/cli/install_cd.go | 2 +- src/pkg/cli/logout.go | 4 +-- src/pkg/cli/new.go | 4 +-- src/pkg/cli/subscribe.go | 4 +-- src/pkg/cli/tail.go | 12 ++++----- src/pkg/cli/tailAndMonitor.go | 8 +++--- src/pkg/cli/teardown_cd.go | 4 +-- src/pkg/clouds/aws/codebuild/cfn/setup.go | 8 +++--- src/pkg/clouds/aws/login.go | 18 ++++++------- src/pkg/clouds/gcp/api.go | 4 +-- src/pkg/clouds/gcp/iam.go | 18 ++++++------- src/pkg/clouds/gcp/login.go | 18 ++++++------- src/pkg/clouds/gcp/storage.go | 2 +- src/pkg/dockerhub/dockerhub.go | 2 +- src/pkg/login/agree_tos.go | 2 +- src/pkg/login/login.go | 6 ++--- src/pkg/mcp/mcp_server.go | 2 +- src/pkg/mcp/resources/resources.go | 4 +-- src/pkg/migrate/heroku.go | 4 +-- src/pkg/migrate/migrate.go | 4 +-- src/pkg/session/session.go | 14 +++++----- src/pkg/setup/setup.go | 12 ++++----- src/pkg/stacks/manager.go | 8 +++--- 47 files changed, 198 insertions(+), 181 deletions(-) diff --git a/src/cmd/cli/command/commands.go b/src/cmd/cli/command/commands.go index a4b6bfece..46a36857a 100644 --- a/src/cmd/cli/command/commands.go +++ b/src/cmd/cli/command/commands.go @@ -50,7 +50,7 @@ func Execute(ctx context.Context) error { if err := RootCmd.ExecuteContext(ctx); err != nil { if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { - slog.Error(fmt.Sprintln("Error:", client.PrettyError(err))) + slog.ErrorContext(ctx, fmt.Sprintln("Error:", client.PrettyError(err))) track.Evt("CLI Error", P("err", err)) } @@ -419,7 +419,7 @@ var RootCmd = &cobra.Command{ version := cmd.Root().Version // HACK to avoid circular dependency with RootCmd slog.Debug(fmt.Sprintln("Fabric:", v.Fabric, "CLI:", version, "CLI-Min:", v.CliMin)) if global.HasTty && isNewer(version, v.CliMin) && !isUpgradeCommand(cmd) { - slog.Warn("Your CLI version is outdated. Please upgrade to the latest version by running:\n\n defang upgrade\n") + slog.WarnContext(ctx, "Your CLI version is outdated. Please upgrade to the latest version by running:\n\n defang upgrade\n") global.HideUpdate = true // hide the upgrade hint at the end } } diff --git a/src/cmd/cli/command/compose.go b/src/cmd/cli/command/compose.go index 6993e3d86..8ea317cf1 100644 --- a/src/cmd/cli/command/compose.go +++ b/src/cmd/cli/command/compose.go @@ -128,7 +128,7 @@ func makeComposeUpCmd() *cobra.Command { } } if len(managedServices) > 0 { - slog.Warn(fmt.Sprintf("Defang cannot monitor status of the following managed service(s): %v.\n To check if the managed service is up, check the status of the service which depends on it.", managedServices)) + slog.WarnContext(ctx, fmt.Sprintf("Defang cannot monitor status of the following managed service(s): %v.\n To check if the managed service is up, check the status of the service which depends on it.", managedServices)) } deploy, project, err := cli.ComposeUp(ctx, global.Client, session.Provider, session.Stack, cli.ComposeUpParams{ @@ -152,7 +152,7 @@ func makeComposeUpCmd() *cobra.Command { printPlaygroundPortalServiceURLs(deploy.Services) if detach { - slog.Info("Detached.") + slog.InfoContext(ctx, "Detached.") return nil } @@ -161,7 +161,7 @@ func makeComposeUpCmd() *cobra.Command { if deploy.Etag != "" { tailSource = "deployment ID " + deploy.Etag } - slog.Info(fmt.Sprintln("Tailing logs for", tailSource, "; press Ctrl+C to detach:")) + slog.InfoContext(ctx, fmt.Sprintln("Tailing logs for", tailSource, "; press Ctrl+C to detach:")) tailOptions := newTailOptionsForDeploy(session.Stack.Name, deploy.Etag, since, global.Verbose) serviceStates, err := cli.TailAndMonitor(ctx, project, session.Provider, time.Duration(waitTimeout)*time.Second, tailOptions) @@ -169,7 +169,7 @@ func makeComposeUpCmd() *cobra.Command { deploymentErr := err debugger, err := debug.NewDebugger(ctx, global.FabricAddr, session.Stack) if err != nil { - slog.Warn(fmt.Sprintln("Failed to initialize debugger:", err)) + slog.WarnContext(ctx, fmt.Sprintln("Failed to initialize debugger:", err)) return deploymentErr } handleTailAndMonitorErr(ctx, deploymentErr, debugger, debug.DebugConfig{ @@ -198,7 +198,7 @@ func makeComposeUpCmd() *cobra.Command { return err } - slog.Info("Done.") + slog.InfoContext(ctx, "Done.") flushWarnings() return nil }, @@ -284,7 +284,7 @@ func confirmDeploymentToNewLocation() (bool, error) { func promptToCreateStack(ctx context.Context, targetDirectory string, params stacks.Parameters) error { if global.NonInteractive { - slog.Info("Consider creating a stack to manage your deployments.") + slog.InfoContext(ctx, "Consider creating a stack to manage your deployments.") printDefangHint("To create a stack, do:", "stack new --name="+params.Name) return nil } @@ -311,7 +311,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * } if connect.CodeOf(originalErr) == connect.CodeResourceExhausted && strings.Contains(originalErr.Error(), "maximum number of projects") { - slog.Error(fmt.Sprintln("Error:", client.PrettyError(originalErr))) + slog.ErrorContext(ctx, fmt.Sprintln("Error:", client.PrettyError(originalErr))) err := handleTooManyProjectsError(ctx, provider, originalErr) if err != nil { return originalErr @@ -323,7 +323,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * return originalErr } - slog.Error(fmt.Sprintln("Error:", client.PrettyError(originalErr))) + slog.ErrorContext(ctx, fmt.Sprintln("Error:", client.PrettyError(originalErr))) return debugger.DebugDeploymentError(ctx, debug.DebugConfig{ Project: project, }, originalErr) @@ -332,7 +332,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * func handleTooManyProjectsError(ctx context.Context, provider client.Provider, originalErr error) error { projectName, err := provider.RemoteProjectName(ctx) if err != nil { - slog.Warn(fmt.Sprintln("failed to get remote project name:", err)) + slog.WarnContext(ctx, fmt.Sprintln("failed to get remote project name:", err)) return originalErr } @@ -344,7 +344,7 @@ func handleTooManyProjectsError(ctx context.Context, provider client.Provider, o _, err = cli.InteractiveComposeDown(ctx, projectName, global.Client, provider) if err != nil { - slog.Warn(fmt.Sprintln("ComposeDown failed:", err)) + slog.WarnContext(ctx, fmt.Sprintln("ComposeDown failed:", err)) printDefangHint("To deactivate a project, do:", "compose down --project-name "+projectName) return originalErr } else { @@ -359,7 +359,7 @@ func handleTailAndMonitorErr(ctx context.Context, err error, debugger *debug.Deb var errDeploymentFailed client.ErrDeploymentFailed if errors.As(err, &errDeploymentFailed) { // Tail got canceled because of deployment failure: prompt to show the debugger - slog.Warn(fmt.Sprintf("%v", errDeploymentFailed)) + slog.WarnContext(ctx, fmt.Sprintf("%v", errDeploymentFailed)) if errDeploymentFailed.Service != "" { debugConfig.FailedServices = []string{errDeploymentFailed.Service} } @@ -521,7 +521,7 @@ func makeComposeConfigCmd() *cobra.Command { CheckAccountInfo: false, }) if err != nil { - slog.Warn(fmt.Sprintln("unable to load stack:", err, "- some information may not be up-to-date")) + slog.WarnContext(ctx, fmt.Sprintln("unable to load stack:", err, "- some information may not be up-to-date")) sessionx = &session.Session{ Loader: configureLoaderForCommand(cmd), Provider: client.NewPlaygroundProvider(global.Client, stacks.DefaultBeta), @@ -531,7 +531,7 @@ func makeComposeConfigCmd() *cobra.Command { _, err = sessionx.Provider.AccountInfo(ctx) if err != nil { - slog.Warn(fmt.Sprintln("unable to connect to cloud provider:", err, "- some information may not be up-to-date")) + slog.WarnContext(ctx, fmt.Sprintln("unable to connect to cloud provider:", err, "- some information may not be up-to-date")) } project, loadErr := sessionx.Loader.LoadProject(ctx) diff --git a/src/cmd/cli/command/compose_test.go b/src/cmd/cli/command/compose_test.go index 0471b79cb..54bf59b85 100644 --- a/src/cmd/cli/command/compose_test.go +++ b/src/cmd/cli/command/compose_test.go @@ -27,25 +27,42 @@ func TestInitializeTailCmd(t *testing.T) { func TestPrintPlaygroundPortalServiceURLs(t *testing.T) { defaultTerm := term.DefaultTerm + oldStdout := os.Stdout t.Cleanup(func() { term.DefaultTerm = defaultTerm + os.Stdout = oldStdout }) - var stdout, stderr bytes.Buffer - term.DefaultTerm = term.NewTerm(os.Stdin, &stdout, &stderr) + // Capture slog output via term logger + var termBuf, stderr bytes.Buffer + term.DefaultTerm = term.NewTerm(os.Stdin, &termBuf, &stderr) slog.SetDefault(logs.NewTermLogger(term.DefaultTerm)) + // Capture fmt.Println output via os.Pipe + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + os.Stdout = w + global.Stack.Provider = client.ProviderDefang global.FabricAddr = client.DefaultFabricAddr printPlaygroundPortalServiceURLs([]*defangv1.ServiceInfo{ { Service: &defangv1.Service{Name: "service1"}, }}) - const want = ` * Monitor your services' status in the defang portal - * - https://portal.defang.io/service/service1 -` - if got := stdout.String(); got != want { - t.Errorf("got %q, want %q", got, want) + + w.Close() + var stdoutBuf bytes.Buffer + stdoutBuf.ReadFrom(r) + + const wantSlog = " * Monitor your services' status in the defang portal\n" + if got := termBuf.String(); got != wantSlog { + t.Errorf("slog output: got %q, want %q", got, wantSlog) + } + const wantStdout = " - https://portal.defang.io/service/service1\n" + if got := stdoutBuf.String(); got != wantStdout { + t.Errorf("stdout output: got %q, want %q", got, wantStdout) } } diff --git a/src/cmd/cli/command/generate.go b/src/cmd/cli/command/generate.go index 68828ec85..5fe5c74a0 100644 --- a/src/cmd/cli/command/generate.go +++ b/src/cmd/cli/command/generate.go @@ -52,7 +52,7 @@ var generateCmd = &cobra.Command{ } func afterGenerate(ctx context.Context, result setup.SetupResult) { - slog.Info(fmt.Sprintln("Code generated successfully in folder", result.Folder)) + slog.InfoContext(ctx, fmt.Sprintln("Code generated successfully in folder", result.Folder)) editor := pkg.Getenv("DEFANG_EDITOR", "code") // TODO: should we use EDITOR env var instead? But won't handle terminal editors like vim cmdd := exec.Command(editor, result.Folder) err := cmdd.Start() diff --git a/src/cmd/cli/command/session.go b/src/cmd/cli/command/session.go index 308a2c64d..d7f58b4e8 100644 --- a/src/cmd/cli/command/session.go +++ b/src/cmd/cli/command/session.go @@ -167,7 +167,7 @@ func handleInvalidComposeFileErr(ctx context.Context, loadErr error) error { return loadErr } - slog.Error(fmt.Sprintln("Cannot load project:", loadErr)) + slog.ErrorContext(ctx, fmt.Sprintln("Cannot load project:", loadErr)) project, err := compose.NewLoader().CreateProjectForDebug() if err != nil { return fmt.Errorf("%w; original error: %w", err, loadErr) diff --git a/src/cmd/cli/command/stack.go b/src/cmd/cli/command/stack.go index 5db887ec1..e665d17cf 100644 --- a/src/cmd/cli/command/stack.go +++ b/src/cmd/cli/command/stack.go @@ -136,7 +136,7 @@ func makeStackListCmd() *cobra.Command { } if len(stacks) == 0 { - slog.Info("No Defang stacks found in the current directory.") + slog.InfoContext(ctx, "No Defang stacks found in the current directory.") return nil } @@ -174,7 +174,7 @@ func makeStackDefaultCmd() *cobra.Command { return err } - slog.Info(fmt.Sprintf("Stack %q is now the default stack for project %q\n", name, projectName)) + slog.InfoContext(ctx, fmt.Sprintf("Stack %q is now the default stack for project %q\n", name, projectName)) return nil }, } diff --git a/src/cmd/cli/command/whoami.go b/src/cmd/cli/command/whoami.go index 071f56d47..f4bd3654b 100644 --- a/src/cmd/cli/command/whoami.go +++ b/src/cmd/cli/command/whoami.go @@ -27,7 +27,7 @@ var whoamiCmd = &cobra.Command{ CheckAccountInfo: false, // because we do it inside cli.Whoami }) if err != nil { - slog.Warn(fmt.Sprintf("Provider account information not available: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Provider account information not available: %v", err)) } else { provider = session.Provider } @@ -40,7 +40,7 @@ var whoamiCmd = &cobra.Command{ userInfo, err = auth.FetchUserInfo(ctx, token) if err != nil { // Either the auth service is down, or we're using a Fabric JWT: skip workspace information - slog.Warn(fmt.Sprintln("Workspace information unavailable:", err)) + slog.WarnContext(ctx, fmt.Sprintln("Workspace information unavailable:", err)) } } diff --git a/src/pkg/agent/tools/logs.go b/src/pkg/agent/tools/logs.go index fa9da6c9f..022b34ace 100644 --- a/src/pkg/agent/tools/logs.go +++ b/src/pkg/agent/tools/logs.go @@ -86,7 +86,7 @@ func HandleLogsTool(ctx context.Context, loader client.Loader, params LogsParams }) if err != nil { - slog.Error(fmt.Sprintln("Failed to fetch logs", "error", err)) + slog.ErrorContext(ctx, fmt.Sprintln("Failed to fetch logs", "error", err)) return "", fmt.Errorf("failed to fetch logs: %w", err) } diff --git a/src/pkg/auth/auth.go b/src/pkg/auth/auth.go index 864495325..8cbf5364b 100644 --- a/src/pkg/auth/auth.go +++ b/src/pkg/auth/auth.go @@ -73,13 +73,13 @@ func StartAuthCodeFlow(ctx context.Context, mcpFlow LoginFlow, saveToken func(st ctx := context.Background() code, err := pollForAuthCode(ctx, ar.state) if err != nil { - slog.Error(fmt.Sprintf("failed to poll for auth code: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("failed to poll for auth code: %v", err)) return } token, err := ExchangeCodeForToken(ctx, AuthCodeFlow{code: code, redirectUri: redirectUri, verifier: ar.verifier}) if err != nil { - slog.Error(fmt.Sprintf("failed to exchange code for token: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("failed to exchange code for token: %v", err)) return } diff --git a/src/pkg/cli/cd.go b/src/pkg/cli/cd.go index daaf6ca98..8335fe77b 100644 --- a/src/pkg/cli/cd.go +++ b/src/pkg/cli/cd.go @@ -22,9 +22,9 @@ import ( func CdCommand(ctx context.Context, projectName string, provider client.Provider, fabric client.FabricClient, command client.CdCommand) (types.ETag, error) { if projectName == "" { // projectName is empty for "list --remote" - slog.Info(fmt.Sprintf("Running CD command %q", command)) + slog.InfoContext(ctx, fmt.Sprintf("Running CD command %q", command)) } else { - slog.Info(fmt.Sprintf("Running CD command %q in project %q", command, projectName)) + slog.InfoContext(ctx, fmt.Sprintf("Running CD command %q in project %q", command, projectName)) } if dryrun.DoDryRun { return "", dryrun.ErrDryRun @@ -49,7 +49,7 @@ func CdCommand(ctx context.Context, projectName string, provider client.Provider case client.CdCommandDown, client.CdCommandDestroy: err := deleteSubdomain(ctx, projectName, provider, fabric) if err != nil { - slog.Warn("Unable to update deployment history; deployment will proceed anyway.") + slog.WarnContext(ctx, "Unable to update deployment history; deployment will proceed anyway.") break } // Update deployment table to mark deployment as destroyed only after successful deletion of the subdomain @@ -67,7 +67,7 @@ func CdCommand(ctx context.Context, projectName string, provider client.Provider }) if err != nil { slog.Debug(fmt.Sprintf("Failed to record deployment: %v", err)) - slog.Warn("Unable to update deployment history; deployment will proceed anyway.") + slog.WarnContext(ctx, "Unable to update deployment history; deployment will proceed anyway.") } } return cd.ETag, nil @@ -83,7 +83,7 @@ func deleteSubdomain(ctx context.Context, projectName string, provider client.Pr // This can fail when the project was deployed from a different workspace than the current one slog.Debug(fmt.Sprintln("DeleteSubdomainZone failed:", err)) if connect.CodeOf(err) == connect.CodeNotFound { - slog.Warn("Subdomain not found; did you mean to destroy a different project or stack?") + slog.WarnContext(ctx, "Subdomain not found; did you mean to destroy a different project or stack?") } return err } @@ -157,7 +157,7 @@ func CdListFromStorage(ctx context.Context, provider client.Provider, allRegions if allRegions { accountInfo.Region = "" } - slog.Info(fmt.Sprintf("No projects found in %v", accountInfo)) + slog.InfoContext(ctx, fmt.Sprintf("No projects found in %v", accountInfo)) } return term.Table(stacks, "Project", "Stack", "Workspace", "CdRegion") diff --git a/src/pkg/cli/cert.go b/src/pkg/cli/cert.go index f4e2c5ca5..65a79988f 100644 --- a/src/pkg/cli/cert.go +++ b/src/pkg/cli/cert.go @@ -96,7 +96,7 @@ func GenerateLetsEncryptCert(ctx context.Context, project *compose.Project, clie } if service, ok := project.Services[serviceInfo.Service.Name]; ok { if service.DomainName != serviceInfo.Domainname { - slog.Warn(fmt.Sprintf("service %q: domainname %q in compose file does not match deployed value %q", service.Name, service.DomainName, serviceInfo.Domainname)) + slog.WarnContext(ctx, fmt.Sprintf("service %q: domainname %q in compose file does not match deployed value %q", service.Name, service.DomainName, serviceInfo.Domainname)) } cnt++ targets := getDomainTargets(serviceInfo, service) @@ -111,7 +111,7 @@ func GenerateLetsEncryptCert(ctx context.Context, project *compose.Project, clie } } if cnt == 0 { - slog.Info("No `domainname` found in compose file; no HTTPS cert generation needed") + slog.InfoContext(ctx, "No `domainname` found in compose file; no HTTPS cert generation needed") } return nil @@ -133,35 +133,35 @@ func getDomainTargets(serviceInfo *defangv1.ServiceInfo, service compose.Service } func generateCert(ctx context.Context, domain string, targets []string, client client.FabricClient) { - slog.Info(fmt.Sprintf("Checking DNS setup for %v", domain)) + slog.InfoContext(ctx, fmt.Sprintf("Checking DNS setup for %v", domain)) if err := waitForCNAME(ctx, domain, targets, client); err != nil { - slog.Error(fmt.Sprintf("Error waiting for CNAME: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error waiting for CNAME: %v", err)) return } - slog.Info(fmt.Sprintf("%v DNS is properly configured!", domain)) + slog.InfoContext(ctx, fmt.Sprintf("%v DNS is properly configured!", domain)) if err := cert.CheckTLSCert(ctx, domain); err == nil { - slog.Info(fmt.Sprintf("TLS cert for %v is already ready", domain)) + slog.InfoContext(ctx, fmt.Sprintf("TLS cert for %v is already ready", domain)) return } if err := pkg.SleepWithContext(ctx, 5*time.Second); err != nil { // slight delay to ensure DNS to propagate - slog.Error(fmt.Sprintf("Error waiting for DNS propagation: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error waiting for DNS propagation: %v", err)) return } - slog.Info(fmt.Sprintf("Triggering cert generation for %v", domain)) + slog.InfoContext(ctx, fmt.Sprintf("Triggering cert generation for %v", domain)) if err := triggerCertGeneration(ctx, domain); err != nil { - slog.Error("Error triggering cert generation, please try again", "domain", domain, "err", err) + slog.ErrorContext(ctx, "Error triggering cert generation, please try again", "domain", domain, "err", err) return } - slog.Info(fmt.Sprintf("Waiting for TLS cert to be online for %v, this could take a few minutes", domain)) + slog.InfoContext(ctx, fmt.Sprintf("Waiting for TLS cert to be online for %v, this could take a few minutes", domain)) if err := waitForTLS(ctx, domain); err != nil { - slog.Error(fmt.Sprintf("Error waiting for TLS to be online: %v", err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error waiting for TLS to be online: %v", err)) // FIXME: Add more info on how to debug, possibly provided by the server side to avoid client type detection here return } - slog.Info(fmt.Sprintf("TLS cert for %v is ready\n", domain)) + slog.InfoContext(ctx, fmt.Sprintf("TLS cert for %v is ready\n", domain)) } func triggerCertGeneration(ctx context.Context, domain string) error { @@ -246,13 +246,13 @@ func waitForCNAME(ctx context.Context, domain string, targets []string, client c } } if serverVerifyRpcFailure >= 3 { - slog.Warn(fmt.Sprintf("Server side DNS verification for %v failed multiple times, skipping server side DNS verification.", domain)) + slog.WarnContext(ctx, fmt.Sprintf("Server side DNS verification for %v failed multiple times, skipping server side DNS verification.", domain)) } } if serverSideVerified || serverVerifyRpcFailure >= 3 { locallyVerified := dns.CheckDomainDNSReady(ctx, domain, targets) if serverSideVerified && !locallyVerified { - slog.Warn(fmt.Sprintf("DNS settings for %v are verified, but changes may take a few minutes to propagate due to caching.", domain)) + slog.WarnContext(ctx, fmt.Sprintf("DNS settings for %v are verified, but changes may take a few minutes to propagate due to caching.", domain)) return nil } if locallyVerified { @@ -265,9 +265,9 @@ func waitForCNAME(ctx context.Context, domain string, targets []string, client c if err := verifyDNS(); err == nil { return nil } - slog.Info(fmt.Sprintf("Configure a CNAME or ALIAS record for the domain name: %v", domain)) + slog.InfoContext(ctx, fmt.Sprintf("Configure a CNAME or ALIAS record for the domain name: %v", domain)) term.Printf(" %v -> %v\n", domain, strings.Join(targets, " or ")) - slog.Info("Awaiting DNS record setup and propagation... This may take a while.") + slog.InfoContext(ctx, "Awaiting DNS record setup and propagation... This may take a while.") for { select { diff --git a/src/pkg/cli/client/byoc/aws/byoc.go b/src/pkg/cli/client/byoc/aws/byoc.go index d4d148718..0ce54d996 100644 --- a/src/pkg/cli/client/byoc/aws/byoc.go +++ b/src/pkg/cli/client/byoc/aws/byoc.go @@ -121,11 +121,11 @@ func NewByocProvider(ctx context.Context, tenantName types.TenantLabel, stack st AWSSecretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY") switch { case AWSAccessKeyID != "" && AWSSecretAccessKey != "": - slog.Warn(fmt.Sprintf("Both AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY and AWS_PROFILE (%q) are set; access keys take precedence and AWS_PROFILE will be ignored", awsProfileName)) + slog.WarnContext(ctx, fmt.Sprintf("Both AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY and AWS_PROFILE (%q) are set; access keys take precedence and AWS_PROFILE will be ignored", awsProfileName)) case AWSAccessKeyID != "" && AWSSecretAccessKey == "": - slog.Warn(fmt.Sprintf("Partial credentials found in env, missing: AWS_SECRET_ACCESS_KEY; using AWS_PROFILE (%q) instead", awsProfileName)) + slog.WarnContext(ctx, fmt.Sprintf("Partial credentials found in env, missing: AWS_SECRET_ACCESS_KEY; using AWS_PROFILE (%q) instead", awsProfileName)) case AWSAccessKeyID == "" && AWSSecretAccessKey != "": - slog.Warn(fmt.Sprintf("Partial credentials found in env, missing: AWS_ACCESS_KEY_ID; using AWS_PROFILE (%q) instead", awsProfileName)) + slog.WarnContext(ctx, fmt.Sprintf("Partial credentials found in env, missing: AWS_ACCESS_KEY_ID; using AWS_PROFILE (%q) instead", awsProfileName)) } } @@ -271,7 +271,7 @@ func (b *ByocAws) deploy(ctx context.Context, req *client.DeployRequest, cmd str dockerHubUser, dockerHubPass, err := dockerhub.GetDockerHubCredentials(ctx) if err != nil { slog.Debug(fmt.Sprintf("Could not retrieve Docker Hub credentials: %v", err)) - slog.Warn("Docker Hub credentials are required to avoid pull throttling. Please run `docker login` or set the DOCKERHUB_USERNAME and DOCKERHUB_TOKEN environment variables. Without valid credentials, image pulls may be rate-limited or fail.") + slog.WarnContext(ctx, "Docker Hub credentials are required to avoid pull throttling. Please run `docker login` or set the DOCKERHUB_USERNAME and DOCKERHUB_TOKEN environment variables. Without valid credentials, image pulls may be rate-limited or fail.") } else { slog.Debug(fmt.Sprintf("Using Docker Hub credentials with user %v", dockerHubUser)) cdCmd.dockerHubUsername = dockerHubUser @@ -289,7 +289,7 @@ func (b *ByocAws) deploy(ctx context.Context, req *client.DeployRequest, cmd str for _, si := range serviceInfos { if si.UseAcmeCert { - slog.Info(fmt.Sprintf("To activate TLS certificate for %v, run 'defang cert gen'", si.Domainname)) + slog.InfoContext(ctx, fmt.Sprintf("To activate TLS certificate for %v, run 'defang cert gen'", si.Domainname)) } } @@ -412,7 +412,7 @@ func (b *ByocAws) findZone(ctx context.Context, domain, roleARN string) (string, return "", err } if len(zones) > 1 { - slog.Warn(fmt.Sprintf("Multiple hosted zones found for domain %q, using the first one: %v", domain, zones[0].Id)) + slog.WarnContext(ctx, fmt.Sprintf("Multiple hosted zones found for domain %q, using the first one: %v", domain, zones[0].Id)) } return *zones[0].Id, nil } @@ -552,7 +552,7 @@ func (b *ByocAws) runCdCommand(ctx context.Context, cmd cdCommand) (awscodebuild if cmd.dockerHubUsername != "" && cmd.dockerHubAccessToken != "" { arn, err := b.putDockerHubSecret(ctx, cmd.project, cmd.dockerHubUsername, cmd.dockerHubAccessToken) if err != nil { - slog.Warn(fmt.Sprintf("Could not store Docker Hub credentials in Secrets Manager, images from dockerhub may be throttled during build: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Could not store Docker Hub credentials in Secrets Manager, images from dockerhub may be throttled during build: %v", err)) } else { env["CI_REGISTRY_CREDENTIALS_ARN"] = arn slog.Debug("Stored Docker Hub credentials in Secrets Manager: " + arn) @@ -697,7 +697,7 @@ func (b *ByocAws) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (ite // if the cloud formation stack has been destroyed, we can still query // logs for builds and services if err := b.driver.FillOutputs(ctx); err != nil { - slog.Warn(fmt.Sprintf("Unable to show CD logs: %v", err)) // TODO: could skip this warning if the user wasn't asking for CD logs + slog.WarnContext(ctx, fmt.Sprintf("Unable to show CD logs: %v", err)) // TODO: could skip this warning if the user wasn't asking for CD logs } cfg, err := b.driver.LoadConfig(ctx) @@ -903,7 +903,7 @@ func (b *ByocAws) UpdateServiceInfo(ctx context.Context, si *defangv1.ServiceInf } func (b *ByocAws) TearDownCD(ctx context.Context) error { - slog.Warn("Deleting the Defang CD cluster; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") + slog.WarnContext(ctx, "Deleting the Defang CD cluster; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") return b.driver.TearDown(ctx) } @@ -963,7 +963,7 @@ func (b *ByocAws) CdList(ctx context.Context, allRegions bool) (iter.Seq[state.I func (b *ByocAws) Subscribe(ctx context.Context, req *defangv1.SubscribeRequest) (iter.Seq2[*defangv1.SubscribeResponse, error], error) { if err := b.driver.FillOutputs(ctx); err != nil { - slog.Warn(fmt.Sprintf("Unable to get log group ARNs: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("Unable to get log group ARNs: %v", err)) } cfg, err := b.driver.LoadConfig(ctx) diff --git a/src/pkg/cli/client/byoc/aws/domain.go b/src/pkg/cli/client/byoc/aws/domain.go index c8cd1cda4..bc7206b04 100644 --- a/src/pkg/cli/client/byoc/aws/domain.go +++ b/src/pkg/cli/client/byoc/aws/domain.go @@ -43,7 +43,7 @@ func prepareDomainDelegation(ctx context.Context, projectDomain, projectName, st // but this is acceptable because the next time the zone is deployed, we'll get the existing delegation set from the zone. delegationSet, err = findUsableDelegationSet(ctx, projectDomain, r53Client, resolverAt) if err != nil { - slog.Warn(fmt.Sprintf("Failed to find existing usable delegation set: %v, creating a new one", err)) + slog.WarnContext(ctx, fmt.Sprintf("Failed to find existing usable delegation set: %v, creating a new one", err)) } if delegationSet != nil { slog.Debug(fmt.Sprintln("Reusing existing usable Route53 delegation set:", *delegationSet.Id)) diff --git a/src/pkg/cli/client/byoc/do/byoc.go b/src/pkg/cli/client/byoc/do/byoc.go index feb844713..1d9857762 100644 --- a/src/pkg/cli/client/byoc/do/byoc.go +++ b/src/pkg/cli/client/byoc/do/byoc.go @@ -447,7 +447,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter return nil, errors.New("no deployments found") } - slog.Info("Waiting for CD command to finish gathering logs") + slog.InfoContext(ctx, "Waiting for CD command to finish gathering logs") for { deploymentInfo, _, err := b.client.Apps.GetDeployment(ctx, appID, deploymentID) if err != nil { @@ -497,7 +497,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter } func (b *ByocDo) TearDownCD(ctx context.Context) error { - slog.Warn("Deleting the Defang CD app; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") + slog.WarnContext(ctx, "Deleting the Defang CD app; currently existing stacks or configs will not be deleted, but they will be orphaned and they will need to be cleaned up manually") app, err := b.getAppByName(ctx, appPlatform.CdName) if err != nil { return err diff --git a/src/pkg/cli/client/byoc/gcp/byoc.go b/src/pkg/cli/client/byoc/gcp/byoc.go index 4161e2cd0..bee3c4937 100644 --- a/src/pkg/cli/client/byoc/gcp/byoc.go +++ b/src/pkg/cli/client/byoc/gcp/byoc.go @@ -175,7 +175,7 @@ func (b *ByocGcp) SetUpCD(ctx context.Context, force bool) error { } // TODO: Handle project creation flow - slog.Info(fmt.Sprintf("Setting up defang CD in GCP project %s, this could take a few minutes", b.driver.GetProjectID())) + slog.InfoContext(ctx, fmt.Sprintf("Setting up defang CD in GCP project %s, this could take a few minutes", b.driver.GetProjectID())) // 1. Enable required APIs // TODO: enable minimum APIs needed for bootstrap the cd image, let CD enable the rest of the APIs apis := []string{ diff --git a/src/pkg/cli/client/byoc/gcp/stream.go b/src/pkg/cli/client/byoc/gcp/stream.go index bcc33840a..4407c336c 100644 --- a/src/pkg/cli/client/byoc/gcp/stream.go +++ b/src/pkg/cli/client/byoc/gcp/stream.go @@ -486,13 +486,13 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor } if entry.GetProtoPayload().GetTypeUrl() != "type.googleapis.com/google.cloud.audit.AuditLog" { - slog.Warn(fmt.Sprintf("unexpected log entry type : %v", entry.GetProtoPayload().GetTypeUrl())) + slog.WarnContext(ctx, fmt.Sprintf("unexpected log entry type : %v", entry.GetProtoPayload().GetTypeUrl())) return nil, nil } auditLog := new(auditpb.AuditLog) if err := entry.GetProtoPayload().UnmarshalTo(auditLog); err != nil { - slog.Warn(fmt.Sprintf("failed to unmarshal audit log : %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to unmarshal audit log : %v", err)) return nil, nil } @@ -528,7 +528,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor Status: status.GetMessage(), }}, nil } else { - slog.Warn(fmt.Sprintf("missing request and response in audit log for service %v", path.Base(auditLog.GetResourceName()))) + slog.WarnContext(ctx, fmt.Sprintf("missing request and response in audit log for service %v", path.Base(auditLog.GetResourceName()))) return nil, nil } @@ -551,7 +551,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor serviceName := GetValueInStruct(response, "spec.template.metadata.labels.defang-service") status := auditLog.GetStatus() if status == nil { - slog.Warn(fmt.Sprintf("missing status in audit log for job %v", path.Base(auditLog.GetResourceName()))) + slog.WarnContext(ctx, fmt.Sprintf("missing status in audit log for job %v", path.Base(auditLog.GetResourceName()))) return nil, nil } var state defangv1.ServiceState @@ -579,7 +579,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor // Report all ready services when CD is successful, prevents cli deploy stop before cd is done return getReadyServicesCompletedResps(auditLog.GetStatus().GetMessage()), nil // Ignore success cd status when we are waiting for service status } else { - slog.Warn(fmt.Sprintf("unexpected execution name in audit log : %v", executionName)) + slog.WarnContext(ctx, fmt.Sprintf("unexpected execution name in audit log : %v", executionName)) return nil, nil } case "gce_instance_group_manager": // Compute engine update start @@ -591,24 +591,24 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor managerName := entry.Resource.Labels["instance_group_manager_name"] labels, err := gcpLogsClient.GetInstanceGroupManagerLabels(ctx, project, region, managerName) if err != nil { - slog.Warn(fmt.Sprintf("failed to get instance group manager labels for %v: %v", managerName, err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to get instance group manager labels for %v: %v", managerName, err)) return nil, nil } serviceName := labels["defang-service"] if serviceName == "" { - slog.Warn(fmt.Sprintf("missing defang-service label in instance group manager %v", managerName)) + slog.WarnContext(ctx, fmt.Sprintf("missing defang-service label in instance group manager %v", managerName)) return nil, nil } if etag != "" { labelEtag := labels["defang-etag"] if labelEtag != etag { - slog.Warn(fmt.Sprintf("skipping instance group manager %v: etag mismatch (got %q, want %q)", managerName, labelEtag, etag)) + slog.WarnContext(ctx, fmt.Sprintf("skipping instance group manager %v: etag mismatch (got %q, want %q)", managerName, labelEtag, etag)) return nil, nil } } rootTriggerId := entry.GetLabels()["compute.googleapis.com/root_trigger_id"] if rootTriggerId == "" { - slog.Warn(fmt.Sprintf("missing root_trigger_id in audit log for instance group manager %v", path.Base(auditLog.GetResourceName()))) + slog.WarnContext(ctx, fmt.Sprintf("missing root_trigger_id in audit log for instance group manager %v", path.Base(auditLog.GetResourceName()))) } else { computeEngineRootTriggers[rootTriggerId] = serviceName } @@ -627,7 +627,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor } response := auditLog.GetResponse() if response == nil { - slog.Warn(fmt.Sprintf("missing response in audit log for instance group %v", path.Base(auditLog.GetResourceName()))) + slog.WarnContext(ctx, fmt.Sprintf("missing response in audit log for instance group %v", path.Base(auditLog.GetResourceName()))) return nil, nil } status := response.GetFields()["status"].GetStringValue() @@ -653,7 +653,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor } bt, err := gcpLogsClient.GetBuildInfo(ctx, buildId) // TODO: Cache the build IDs? if err != nil { - slog.Warn(fmt.Sprintf("failed to get build tag for build %v: %v", buildId, err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to get build tag for build %v: %v", buildId, err)) return nil, nil } @@ -707,7 +707,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor }}, nil } default: - slog.Warn(fmt.Sprintf("unexpected resource type : %v", entry.Resource.Type)) + slog.WarnContext(ctx, fmt.Sprintf("unexpected resource type : %v", entry.Resource.Type)) return nil, nil } } diff --git a/src/pkg/cli/compose/context.go b/src/pkg/cli/compose/context.go index 86cd26b91..21726f604 100644 --- a/src/pkg/cli/compose/context.go +++ b/src/pkg/cli/compose/context.go @@ -220,7 +220,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec return fmt.Sprintf("s3://cd-preview/%s%s", service, archiveType.Extension), nil } - slog.Info(fmt.Sprintln("Packaging the project files for", service, "at", root)) + slog.InfoContext(ctx, fmt.Sprintln("Packaging the project files for", service, "at", root)) buffer, err := createArchive(ctx, build.Context, build.Dockerfile, archiveType) if err != nil { return "", err @@ -242,7 +242,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec panic("unexpected UploadMode value") } - slog.Info(fmt.Sprintln("Uploading the project files for", service)) + slog.InfoContext(ctx, fmt.Sprintln("Uploading the project files for", service)) return uploadArchive(ctx, provider, projectName, buffer, archiveType, digest) } @@ -475,7 +475,7 @@ func createArchive(ctx context.Context, root string, dockerfile string, contentT fileCount++ if fileCount == ContextFileLimit+1 { - slog.Warn(fmt.Sprintf("the build context contains more than %d files; use --debug or create .dockerignore to exclude caches and build artifacts", ContextFileLimit)) + slog.WarnContext(ctx, fmt.Sprintf("the build context contains more than %d files; use --debug or create .dockerignore to exclude caches and build artifacts", ContextFileLimit)) } bufLen := buf.Len() @@ -484,7 +484,7 @@ func createArchive(ctx context.Context, root string, dockerfile string, contentT return fmt.Errorf("the build context is limited to %s; consider downloading large files in the Dockerfile or set the DEFANG_BUILD_CONTEXT_LIMIT environment variable", units.BytesSize(float64(ContextSizeHardLimit))) } if bufLen <= ContextSizeSoftLimit && buf.Len() > ContextSizeSoftLimit { - slog.Warn(fmt.Sprintf("the build context is larger than %s; use --debug or create .dockerignore to exclude caches and build artifacts", units.BytesSize(float64(buf.Len())))) + slog.WarnContext(ctx, fmt.Sprintf("the build context is larger than %s; use --debug or create .dockerignore to exclude caches and build artifacts", units.BytesSize(float64(buf.Len())))) } return err }) diff --git a/src/pkg/cli/compose/fixup.go b/src/pkg/cli/compose/fixup.go index 67a6f8b43..a8235e0e5 100644 --- a/src/pkg/cli/compose/fixup.go +++ b/src/pkg/cli/compose/fixup.go @@ -83,7 +83,7 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // Ignore "build" config if we have "image", unless in --build or --force mode if svccfg.Image != "" && svccfg.Build != nil && upload != UploadModeDigest && upload != UploadModeForce { - slog.Warn(fmt.Sprintf("service %q: using published image instead of rebuilding; pass --build to build and publish a new image", svccfg.Name)) + slog.WarnContext(ctx, fmt.Sprintf("service %q: using published image instead of rebuilding; pass --build to build and publish a new image", svccfg.Name)) svccfg.Build = nil } @@ -155,14 +155,14 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo } if len(removedArgs) > 0 { - slog.Warn(fmt.Sprintf("service %q: skipping unset build argument %q", svccfg.Name, removedArgs)) + slog.WarnContext(ctx, fmt.Sprintf("service %q: skipping unset build argument %q", svccfg.Name, removedArgs)) } } // Fixup secret references; secrets are supposed to be files, not env, but it's kept for backward compatibility for i, secret := range svccfg.Secrets { if i == 0 { // only warn once - slog.Warn(fmt.Sprintf("service %q: secrets will be exposed as environment variables, not files (use 'environment' instead)", svccfg.Name)) + slog.WarnContext(ctx, fmt.Sprintf("service %q: secrets will be exposed as environment variables, not files (use 'environment' instead)", svccfg.Name)) } svccfg.Environment[secret.Source] = nil } @@ -176,7 +176,7 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // A bug in Compose-go env file parsing can cause empty keys if key == "" { if !shownOnce { - slog.Warn(fmt.Sprintf("service %q: skipping unset environment variable key", svccfg.Name)) + slog.WarnContext(ctx, fmt.Sprintf("service %q: skipping unset environment variable key", svccfg.Name)) shownOnce = true } delete(svccfg.Environment, key) // remove the empty key; this is safe @@ -204,17 +204,17 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo } if len(notAdjusted) > 0 { - slog.Warn(fmt.Sprintf("service %q: environment variable(s) %q will use the `defang config` value instead of adjusted service name", svccfg.Name, notAdjusted)) + slog.WarnContext(ctx, fmt.Sprintf("service %q: environment variable(s) %q will use the `defang config` value instead of adjusted service name", svccfg.Name, notAdjusted)) } if len(overridden) > 0 { - slog.Warn(fmt.Sprintf("service %q: environment variable(s) %q overridden by config", svccfg.Name, overridden)) + slog.WarnContext(ctx, fmt.Sprintf("service %q: environment variable(s) %q overridden by config", svccfg.Name, overridden)) } _, scaling := svccfg.Extensions["x-defang-autoscaling"] if scaling { if _, ok := provider.(*client.PlaygroundProvider); ok { - slog.Warn(fmt.Sprintf("service %q: auto-scaling is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name)) + slog.WarnContext(ctx, fmt.Sprintf("service %q: auto-scaling is not supported in the Playground; consider using BYOC (https://s.defang.io/byoc)", svccfg.Name)) } } diff --git a/src/pkg/cli/composeDown.go b/src/pkg/cli/composeDown.go index e4ab2ffbd..44eae2bcf 100644 --- a/src/pkg/cli/composeDown.go +++ b/src/pkg/cli/composeDown.go @@ -35,6 +35,6 @@ func InteractiveComposeDown(ctx context.Context, projectName string, fabric clie return "", ErrDoNotComposeDown } - slog.Info("Deactivating project " + projectName) + slog.InfoContext(ctx, "Deactivating project "+projectName) return ComposeDown(ctx, projectName, fabric, provider) } diff --git a/src/pkg/cli/composeUp.go b/src/pkg/cli/composeUp.go index 400c0b8a6..a34ad6bae 100644 --- a/src/pkg/cli/composeUp.go +++ b/src/pkg/cli/composeUp.go @@ -210,7 +210,7 @@ func ComposeUp(ctx context.Context, fabric client.FabricClient, provider client. }) if err != nil { slog.Debug(fmt.Sprintln("Failed to record deployment:", err)) - slog.Warn("Unable to update deployment history; deployment will proceed anyway.") + slog.WarnContext(ctx, "Unable to update deployment history; deployment will proceed anyway.") } if term.DoDebug() { diff --git a/src/pkg/cli/configList.go b/src/pkg/cli/configList.go index e3bffc248..d94a9830b 100644 --- a/src/pkg/cli/configList.go +++ b/src/pkg/cli/configList.go @@ -24,7 +24,7 @@ func ConfigList(ctx context.Context, projectName string, provider client.Provide numConfigs := len(config.Names) if numConfigs == 0 { - slog.Warn("No configs found") + slog.WarnContext(ctx, "No configs found") return nil } diff --git a/src/pkg/cli/deploymentsList.go b/src/pkg/cli/deploymentsList.go index c5cb23372..071091d0e 100644 --- a/src/pkg/cli/deploymentsList.go +++ b/src/pkg/cli/deploymentsList.go @@ -49,9 +49,9 @@ func DeploymentsList(ctx context.Context, client client.FabricClient, params Lis active = " active" } if params.ProjectName == "" { - slog.Warn(fmt.Sprintf("No%s deployments found; use --workspace to specify a different workspace", active)) + slog.WarnContext(ctx, fmt.Sprintf("No%s deployments found; use --workspace to specify a different workspace", active)) } else { - slog.Warn(fmt.Sprintf("No%s deployments found for project %q", active, params.ProjectName)) + slog.WarnContext(ctx, fmt.Sprintf("No%s deployments found for project %q", active, params.ProjectName)) } return nil } diff --git a/src/pkg/cli/estimate.go b/src/pkg/cli/estimate.go index 3f66f14b1..436a38956 100644 --- a/src/pkg/cli/estimate.go +++ b/src/pkg/cli/estimate.go @@ -28,7 +28,7 @@ func RunEstimate(ctx context.Context, project *compose.Project, client client.Fa return nil, err } - slog.Info("Preparing estimate") + slog.InfoContext(ctx, "Preparing estimate") estimate, err := client.Estimate(ctx, &defangv1.EstimateRequest{ Provider: estimateProviderID.Value(), @@ -68,7 +68,7 @@ func GeneratePreview(ctx context.Context, project *compose.Project, client clien return "", err } - slog.Info("Generating deployment preview, this may take a few minutes...") + slog.InfoContext(ctx, "Generating deployment preview, this may take a few minutes...") var pulumiPreviewLogLines []string tailOptions := TailOptions{ Deployment: resp.Etag, diff --git a/src/pkg/cli/generate.go b/src/pkg/cli/generate.go index 34a9f4c45..e1cc80d77 100644 --- a/src/pkg/cli/generate.go +++ b/src/pkg/cli/generate.go @@ -24,7 +24,7 @@ type GenerateArgs struct { func GenerateWithAI(ctx context.Context, client client.FabricClient, args GenerateArgs) ([]string, error) { if dryrun.DoDryRun { - slog.Warn("Dry run, no project files will be generated") + slog.WarnContext(ctx, "Dry run, no project files will be generated") return nil, dryrun.ErrDryRun } @@ -50,7 +50,7 @@ func GenerateWithAI(ctx context.Context, client client.FabricClient, args Genera } // Write each file to disk - slog.Info("Writing files to disk...") + slog.InfoContext(ctx, "Writing files to disk...") if err := os.MkdirAll(args.Folder, 0755); err != nil { return nil, err } diff --git a/src/pkg/cli/install_cd.go b/src/pkg/cli/install_cd.go index 023cc7d85..e4194f321 100644 --- a/src/pkg/cli/install_cd.go +++ b/src/pkg/cli/install_cd.go @@ -13,6 +13,6 @@ func InstallCD(ctx context.Context, provider client.Provider, force bool) error if dryrun.DoDryRun { return errors.New("dry run") } - slog.Info("Installing the CD resources into the cluster") + slog.InfoContext(ctx, "Installing the CD resources into the cluster") return provider.SetUpCD(ctx, force) } diff --git a/src/pkg/cli/logout.go b/src/pkg/cli/logout.go index 3963c51d7..c8d09eee0 100644 --- a/src/pkg/cli/logout.go +++ b/src/pkg/cli/logout.go @@ -19,7 +19,7 @@ func Logout(ctx context.Context, fabricClient client.FabricClient, fabricAddr st } if err := client.TokenStore.Delete(client.TokenStorageName(fabricAddr)); err != nil { - slog.Warn(fmt.Sprintln("Failed to remove stored token:", err)) + slog.WarnContext(ctx, fmt.Sprintln("Failed to remove stored token:", err)) // Don't return the error - we still consider logout successful } @@ -27,7 +27,7 @@ func Logout(ctx context.Context, fabricClient client.FabricClient, fabricAddr st jwtFile, err := client.GetWebIdentityTokenFile(fabricAddr) if err == nil { if err := os.Remove(jwtFile); err != nil && !os.IsNotExist(err) { - slog.Warn(fmt.Sprintln("Failed to remove JWT token file:", err)) + slog.WarnContext(ctx, fmt.Sprintln("Failed to remove JWT token file:", err)) } else if err == nil { slog.Debug(fmt.Sprintln("Removed JWT token file:", jwtFile)) } diff --git a/src/pkg/cli/new.go b/src/pkg/cli/new.go index a55f77ee3..e2492e66b 100644 --- a/src/pkg/cli/new.go +++ b/src/pkg/cli/new.go @@ -76,7 +76,7 @@ func copyFromSamples(ctx context.Context, dir string, names []string, skipExisti } defer tarball.Close() tarReader := tar.NewReader(tarball) - slog.Info("Copying files to disk...") + slog.InfoContext(ctx, "Copying files to disk...") sampleFound := false @@ -114,7 +114,7 @@ func copyFromSamples(ctx context.Context, dir string, names []string, skipExisti if !skipExisting || !os.IsExist(err) { return err } - slog.Warn(fmt.Sprintf("File already exists, skipping: %q", path)) + slog.WarnContext(ctx, fmt.Sprintf("File already exists, skipping: %q", path)) } } } diff --git a/src/pkg/cli/subscribe.go b/src/pkg/cli/subscribe.go index 3fa5796ec..f94e7d8cf 100644 --- a/src/pkg/cli/subscribe.go +++ b/src/pkg/cli/subscribe.go @@ -58,7 +58,7 @@ func WaitServiceState( // a minute and DelayBeforeRetry backs off exponentially up to 1 minute). if isTransientError(err) { if connect.CodeOf(err) == connect.CodeResourceExhausted { - slog.Warn(fmt.Sprintf("quota exceeded; will retry subscribe stream after backoff: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("quota exceeded; will retry subscribe stream after backoff: %v", err)) } else { slog.Debug(fmt.Sprintf("WaitServiceState: transient error, reconnecting subscribe stream: %v", err)) } @@ -83,7 +83,7 @@ func WaitServiceState( } } - slog.Info(fmt.Sprintf("Waiting for services to finish deploying: %q\n", pendingServices)) // TODO: don't print in Go-routine + slog.InfoContext(ctx, fmt.Sprintf("Waiting for services to finish deploying: %q\n", pendingServices)) // TODO: don't print in Go-routine if msg == nil { continue diff --git a/src/pkg/cli/tail.go b/src/pkg/cli/tail.go index 06d741ce7..102ac9d64 100644 --- a/src/pkg/cli/tail.go +++ b/src/pkg/cli/tail.go @@ -162,11 +162,11 @@ func Tail(ctx context.Context, provider client.Provider, projectName string, opt if _, err := provider.GetService(ctx, &defangv1.GetRequest{Project: projectName, Name: service}); err != nil { switch connect.CodeOf(err) { case connect.CodeNotFound: - slog.Warn(fmt.Sprintf("Service does not exist (yet): %q", service)) + slog.WarnContext(ctx, fmt.Sprintf("Service does not exist (yet): %q", service)) case connect.CodeUnknown: // Ignore unknown (nil) errors default: - slog.Warn(fmt.Sprintf("%v", err)) // TODO: use client.PrettyError(…) + slog.WarnContext(ctx, fmt.Sprintf("%v", err)) // TODO: use client.PrettyError(…) } } } @@ -269,7 +269,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if oldState, err := term.MakeUnbuf(int(os.Stdin.Fd())); err == nil { defer term.Restore(int(os.Stdin.Fd()), oldState) - slog.Info("Showing only build logs and runtime errors. Press V to toggle verbose mode.") + slog.InfoContext(ctx, "Showing only build logs and runtime errors. Press V to toggle verbose mode.") input := term.NewNonBlockingStdin() defer input.Close() // abort the read loop go func() { @@ -291,7 +291,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if debug { debugStr = "ON" } - slog.Info(fmt.Sprintln("Debug mode", debugStr)) + slog.InfoContext(ctx, fmt.Sprintln("Debug mode", debugStr)) track.Evt("Debug Toggled", P("debug", debug)) case 'v', 'V': verbose := !options.Verbose @@ -303,7 +303,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if toggleCount++; toggleCount == 4 && !verbose { modeStr += ". I like the way you work it, no verbosity." } - slog.Info(fmt.Sprintln("Verbose mode", modeStr)) + slog.InfoContext(ctx, fmt.Sprintln("Verbose mode", modeStr)) track.Evt("Verbose Toggled", P("verbose", verbose), P("toggleCount", toggleCount)) } } @@ -380,7 +380,7 @@ func receiveLogs(ctx context.Context, provider client.Provider, projectName stri slog.Debug(fmt.Sprintln("Disconnected:", err)) var spaces int if !options.Raw { - slog.Warn("Reconnecting...\r") + slog.WarnContext(ctx, "Reconnecting...\r") spaces = len(" ! Reconnecting...\r") // warnPrefix + message, used to clear the line } if err := provider.DelayBeforeRetry(ctx); err != nil { diff --git a/src/pkg/cli/tailAndMonitor.go b/src/pkg/cli/tailAndMonitor.go index 038cb0add..fbedc89eb 100644 --- a/src/pkg/cli/tailAndMonitor.go +++ b/src/pkg/cli/tailAndMonitor.go @@ -57,7 +57,7 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie // When CD fails, stop WaitServiceState cancelSvcStatus(cdErr) } else { - slog.Info("Deployment complete. Waiting for services to be healthy...") + slog.InfoContext(ctx, "Deployment complete. Waiting for services to be healthy...") } }() @@ -76,7 +76,7 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie slog.Debug(fmt.Sprintln("Tail while monitoring stopped with", err, errors.Unwrap(err))) if connect.CodeOf(err) == connect.CodePermissionDenied { - slog.Warn("Unable to tail logs. Waiting for the deployment to finish.") + slog.WarnContext(ctx, "Unable to tail logs. Waiting for the deployment to finish.") // If tail fails because of missing permission, we wait for the deployment to finish <-tailCtx.Done() // Get the actual error from the context so we won't print "Error: missing tail permission" @@ -88,14 +88,14 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie break // an end condition was detected; cdErr and/or svcErr might be nil case errors.Is(context.Cause(ctx), context.Canceled): - slog.Warn("Deployment is not finished. Service(s) might not be running.") + slog.WarnContext(ctx, "Deployment is not finished. Service(s) might not be running.") case errors.Is(context.Cause(tailCtx), errMonitoringDone): break // the monitoring stopped the tail; cdErr and/or svcErr will have been set case errors.Is(context.Cause(ctx), context.DeadlineExceeded): // Tail was canceled when wait-timeout is reached; show a warning and exit with an error - slog.Warn("Wait-timeout exceeded, detaching from logs. Deployment still in progress.") + slog.WarnContext(ctx, "Wait-timeout exceeded, detaching from logs. Deployment still in progress.") fallthrough default: diff --git a/src/pkg/cli/teardown_cd.go b/src/pkg/cli/teardown_cd.go index 291a19309..aa318436e 100644 --- a/src/pkg/cli/teardown_cd.go +++ b/src/pkg/cli/teardown_cd.go @@ -34,9 +34,9 @@ func TearDownCD(ctx context.Context, provider client.Provider, force bool) error }) if len(stacks) > 0 { - slog.Info("Some stacks are currently deployed. Run the following commands to tear them down:") + slog.InfoContext(ctx, "Some stacks are currently deployed. Run the following commands to tear them down:") for _, stack := range stacks { - slog.Info(fmt.Sprintf(" `defang down --workspace %s --project-name %s --stack %s`", stack.Workspace, stack.Project, stack.Stack)) + slog.InfoContext(ctx, fmt.Sprintf(" `defang down --workspace %s --project-name %s --stack %s`", stack.Workspace, stack.Project, stack.Stack)) } if !force { return ErrExistingStacks diff --git a/src/pkg/clouds/aws/codebuild/cfn/setup.go b/src/pkg/clouds/aws/codebuild/cfn/setup.go index eeb31d4f7..1856e1503 100644 --- a/src/pkg/clouds/aws/codebuild/cfn/setup.go +++ b/src/pkg/clouds/aws/codebuild/cfn/setup.go @@ -94,7 +94,7 @@ func (a *AwsCfn) updateStackAndWait(ctx context.Context, templateBody string, fo return err // might call createStackAndWait depending on the error } - slog.Info(fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be updated...")) // TODO: verbose only + slog.InfoContext(ctx, fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be updated...")) // TODO: verbose only dso, err := cloudformation.NewStackUpdateCompleteWaiter(cfn, update1s).WaitForOutput(ctx, &cloudformation.DescribeStacksInput{ StackName: uso.StackId, }, stackTimeout) @@ -131,7 +131,7 @@ func (a *AwsCfn) createStackAndWait(ctx context.Context, templateBody string, pa } } - slog.Info(fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be created...")) // TODO: verbose only + slog.InfoContext(ctx, fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be created...")) // TODO: verbose only dso, err := cloudformation.NewStackCreateCompleteWaiter(cfn, create1s).WaitForOutput(ctx, &cloudformation.DescribeStacksInput{ StackName: ptr.String(a.stackName), }, stackTimeout) @@ -252,7 +252,7 @@ func (a *AwsCfn) TearDown(ctx context.Context) error { StackName: ptr.String(a.stackName), EnableTerminationProtection: ptr.Bool(false), }); err != nil { - slog.Warn(fmt.Sprintf("Failed to disable termination protection for CloudFormation stack %s: %v\n", a.stackName, err)) + slog.WarnContext(ctx, fmt.Sprintf("Failed to disable termination protection for CloudFormation stack %s: %v\n", a.stackName, err)) } _, err = cfn.DeleteStack(ctx, &cloudformation.DeleteStackInput{ StackName: ptr.String(a.stackName), @@ -262,7 +262,7 @@ func (a *AwsCfn) TearDown(ctx context.Context) error { return err } - slog.Info(fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be deleted...")) // TODO: verbose only + slog.InfoContext(ctx, fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be deleted...")) // TODO: verbose only return cloudformation.NewStackDeleteCompleteWaiter(cfn, delete1s).Wait(ctx, &cloudformation.DescribeStacksInput{ StackName: ptr.String(a.stackName), }, stackTimeout) diff --git a/src/pkg/clouds/aws/login.go b/src/pkg/clouds/aws/login.go index 5383fd2f1..55e917847 100644 --- a/src/pkg/clouds/aws/login.go +++ b/src/pkg/clouds/aws/login.go @@ -86,7 +86,7 @@ func (p *awsOAuthCredentialsProvider) Retrieve(ctx context.Context) (awssdk.Cred return awssdk.Credentials{}, fmt.Errorf("marshaling refreshed token: %w", err) } if err := p.tokenStore.Save(p.storeKey, string(tokenBytes)); err != nil { - slog.Warn(fmt.Sprintf("failed to persist refreshed AWS OAuth token: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to persist refreshed AWS OAuth token: %v", err)) } else { slog.Debug(fmt.Sprintf("persisted refreshed AWS OAuth token for %q", p.storeKey)) } @@ -154,7 +154,7 @@ func (a *Aws) Authenticate(ctx context.Context, interactive bool) error { if !interactive { return errors.New("no valid AWS credentials found") // TODO: Better error message with possible doc link } - slog.Info("no valid credentials found, starting interactive login...") + slog.InfoContext(ctx, "no valid credentials found, starting interactive login...") creds, err := a.tryInteractiveLogin(ctx, 3) if err != nil { return err @@ -180,7 +180,7 @@ func (a *Aws) tryInteractiveLogin(ctx context.Context, n int) (awssdk.Credential sum := sha256.Sum256([]byte(cached.LoginSession)) storeKey = fmt.Sprintf("%s%x", tokenStoreKeyPrefix, sum) if err := a.TokenStore.Save(storeKey, string(tokenBytes)); err != nil { - slog.Warn(fmt.Sprintf("failed to save AWS OAuth token: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to save AWS OAuth token: %v", err)) } } @@ -188,7 +188,7 @@ func (a *Aws) tryInteractiveLogin(ctx context.Context, n int) (awssdk.Credential creds, err := a.testCredentialsWithProfile(ctx, storeKey, provider) if err != nil { - slog.Warn(fmt.Sprintf("Cannot use login credentials: %v, please try again.", err)) + slog.WarnContext(ctx, fmt.Sprintf("Cannot use login credentials: %v, please try again.", err)) continue } return creds, nil @@ -257,13 +257,13 @@ func (a *Aws) testCredentialsWithProfile(ctx context.Context, name string, creds // If the stack/env specifies an AWS_PROFILE with role, try assume the role roleArn, profile, err := a.GetStackAwsProfileRoleArn(ctx) if err != nil { - slog.Warn(fmt.Sprintf("failed to get AWS_PROFILE role ARN: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to get AWS_PROFILE role ARN: %v", err)) } else if profile == "" { - slog.Warn("AWS_PROFILE environment variable is not set, skipping AWS_PROFILE role validation") + slog.WarnContext(ctx, "AWS_PROFILE environment variable is not set, skipping AWS_PROFILE role validation") } else if roleArn != "" { same, err := sameRole(*identity.Arn, roleArn) if err != nil { - slog.Warn(fmt.Sprintf("failed to compare token identity with AWS_PROFILE role: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to compare token identity with AWS_PROFILE role: %v", err)) } else if same { slog.Debug(fmt.Sprintf("token %q identity %q matches AWS_PROFILE role %q", name, *identity.Arn, roleArn)) return creds, nil @@ -289,7 +289,7 @@ func (a *Aws) testCredentialsWithProfile(ctx context.Context, name string, creds return nil, fmt.Errorf("login successful, but does not have access to role %q in used by stack aws profile %q; token account %v does not match stack aws profile account %v", roleArn, profile, *identity.Account, parsedArn.AccountID) } // If cannot assume but it's the same account, we assume its a valid token - slog.Warn(fmt.Sprintf("login successful for AWS account %v which is same as the account specified by stack aws profile %q, assume its valid", *identity.Account, profile)) + slog.WarnContext(ctx, fmt.Sprintf("login successful for AWS account %v which is same as the account specified by stack aws profile %q, assume its valid", *identity.Account, profile)) return creds, nil } // If able to assume the profile role, use the assumed role credentials @@ -339,7 +339,7 @@ func (a *Aws) InteractiveLogin(ctx context.Context) (*awsTokenCache, error) { port := "8080" // default port if parsing fails parsed, err := url.Parse(redirectURL) if err != nil { - slog.Warn(fmt.Sprintf("failed to parse redirect URL %q, assume port 8080: %v", redirectURL, err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to parse redirect URL %q, assume port 8080: %v", redirectURL, err)) } else { port = parsed.Port() } diff --git a/src/pkg/clouds/gcp/api.go b/src/pkg/clouds/gcp/api.go index 0f80a930c..059f694c9 100644 --- a/src/pkg/clouds/gcp/api.go +++ b/src/pkg/clouds/gcp/api.go @@ -39,7 +39,7 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { if errors.As(err, &apiErr) && (apiErr.Code == 403 || apiErr.Code == 401) { return fmt.Errorf("permission denied when enabling services: %w", err) } - slog.Error(fmt.Sprintf("Error: %+v (%T)", err, err)) + slog.ErrorContext(ctx, fmt.Sprintf("Error: %+v (%T)", err, err)) if i < maxAttempts-1 { slog.Debug(fmt.Sprintf("Failed to enable services, will retry in %v: %v\n", retryInterval, err)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { @@ -54,7 +54,7 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { for { op, err := opService.Get(operation.Name).Context(ctx).Do() if err != nil { - slog.Warn(fmt.Sprintf("Failed to get operation status: %v\n", err)) + slog.WarnContext(ctx, fmt.Sprintf("Failed to get operation status: %v\n", err)) } else if op.Done { // Check if the operation is done if op.Error != nil { if i < maxAttempts-1 { diff --git a/src/pkg/clouds/gcp/iam.go b/src/pkg/clouds/gcp/iam.go index d14a12811..0a54fe326 100644 --- a/src/pkg/clouds/gcp/iam.go +++ b/src/pkg/clouds/gcp/iam.go @@ -45,7 +45,7 @@ func (gcp Gcp) EnsureRoleExists(ctx context.Context, roleId, title, description role.Title = title role.Description = description role.Stage = iamadmpb.Role_GA - slog.Info("Updating role " + roleId) + slog.InfoContext(ctx, "Updating role "+roleId) if _, err := client.UpdateRole(ctx, &iamadmpb.UpdateRoleRequest{Name: roleName, Role: role}); err != nil { return "", fmt.Errorf("failed to update role: %w", err) } @@ -63,7 +63,7 @@ func (gcp Gcp) EnsureRoleExists(ctx context.Context, roleId, title, description Stage: iamadmpb.Role_GA, // TODO: investigate stage }, } - slog.Info("Creating role " + roleId) + slog.InfoContext(ctx, "Creating role "+roleId) role, err = client.CreateRole(ctx, req) if err != nil { return "", fmt.Errorf("failed to create role: %w", err) @@ -108,7 +108,7 @@ func (gcp Gcp) EnsureServiceAccountExists(ctx context.Context, serviceAccountId, account.DisplayName = displayName account.Description = description - slog.Info("Updating service account " + serviceAccountId) + slog.InfoContext(ctx, "Updating service account "+serviceAccountId) if _, err := client.UpdateServiceAccount(ctx, &iamadmpb.ServiceAccount{Name: account.Name, DisplayName: displayName, Description: description}); err != nil { return "", fmt.Errorf("failed to update service account: %w", err) } @@ -124,7 +124,7 @@ func (gcp Gcp) EnsureServiceAccountExists(ctx context.Context, serviceAccountId, }, Name: "projects/" + gcp.ProjectId, } - slog.Info("Creating service account " + serviceAccountId) + slog.InfoContext(ctx, "Creating service account "+serviceAccountId) account, err := client.CreateServiceAccount(ctx, req) if err != nil { return "", fmt.Errorf("failed to create service account: %w", err) @@ -192,11 +192,11 @@ func (gcp Gcp) EnsurePrincipalHasBucketRoles(ctx context.Context, bucketName, pr return nil } - slog.Info(fmt.Sprintf("Updating IAM policy for principal %s on bucket %s", principal, bucketName)) + slog.InfoContext(ctx, fmt.Sprintf("Updating IAM policy for principal %s on bucket %s", principal, bucketName)) for i := range maxAttempts { // Service account might not be visible for a few seconds after creation for policy attachment if err := bucket.IAM().SetPolicy(ctx, policy); err != nil { if i < maxAttempts-1 { - slog.Info(fmt.Sprintf("Failed to set IAM policy, will retry in %v: %v\n", retryInterval, err)) + slog.InfoContext(ctx, fmt.Sprintf("Failed to set IAM policy, will retry in %v: %v\n", retryInterval, err)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } @@ -269,14 +269,14 @@ func (gcp Gcp) EnsurePrincipalHasServiceAccountRoles(ctx context.Context, princi return nil } - slog.Info(fmt.Sprintf("Updating IAM policy for %s on service account %s", principal, serviceAccount)) + slog.InfoContext(ctx, fmt.Sprintf("Updating IAM policy for %s on service account %s", principal, serviceAccount)) for i := range maxAttempts { // Service account might not be visible for a few seconds after creation for policy attachment if _, err := client.SetIamPolicy(ctx, &iamadm.SetIamPolicyRequest{ Resource: resource, Policy: policy, }); err != nil { if i < maxAttempts-1 { - slog.Info(fmt.Sprintf("Failed to set IAM policy for service account %s, will retry in %v: %v\n", serviceAccount, retryInterval, err)) + slog.InfoContext(ctx, fmt.Sprintf("Failed to set IAM policy for service account %s, will retry in %v: %v\n", serviceAccount, retryInterval, err)) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } @@ -348,7 +348,7 @@ func ensurePrincipalHasRolesWithResource(ctx context.Context, client resourceWit slog.Debug(fmt.Sprintf("%s already has roles %v on resource %s", principal, roles, resource)) return nil } - slog.Info("Updating IAM policy for resource " + resource) + slog.InfoContext(ctx, "Updating IAM policy for resource "+resource) for i := range maxAttempts { // Service account might not be visible for a few seconds after creation for policy attachment if _, err := client.SetIamPolicy(ctx, &iampb.SetIamPolicyRequest{Resource: resource, Policy: policy}); err != nil { diff --git a/src/pkg/clouds/gcp/login.go b/src/pkg/clouds/gcp/login.go index ca0afe5e0..5613e13d8 100644 --- a/src/pkg/clouds/gcp/login.go +++ b/src/pkg/clouds/gcp/login.go @@ -128,11 +128,11 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - slog.Warn(fmt.Sprintf("failed to get GitHub Actions OIDC token source: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to get GitHub Actions OIDC token source: %v", err)) } else if tokenSource != nil { slog.Debug("found GitHub Actions OIDC token source, testing permissions...") if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, tokenSource); err != nil { - slog.Warn(fmt.Sprintf("GitHub Actions OIDC token is missing required permissions on project %q: %v\nPlease ensure your workload identity provider and github actions permissions are set up correctly: https://docs.defang.com/defang-byoc/gcp/github-actions\n", gcp.ProjectId, err)) + slog.WarnContext(ctx, fmt.Sprintf("GitHub Actions OIDC token is missing required permissions on project %q: %v\nPlease ensure your workload identity provider and github actions permissions are set up correctly: https://docs.defang.com/defang-byoc/gcp/github-actions\n", gcp.ProjectId, err)) } else { slog.Debug("GitHub Actions OIDC token has required permissions") gcp.Options = append(gcp.Options, option.WithTokenSource(tokenSource)) @@ -147,7 +147,7 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - slog.Warn(fmt.Sprintf("failed to load stored credentials: %v", err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to load stored credentials: %v", err)) } else if tokenSource != nil { slog.Debug("found valid stored credentials with required permissions") gcp.Options = append(gcp.Options, option.WithTokenSource(tokenSource)) @@ -171,11 +171,11 @@ func (gcp *Gcp) tryInteractiveLogin(ctx context.Context, n int) error { } if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, tokenSource); err != nil { if errors.As(err, &ErrorMissingPermissions{}) { - slog.Warn(fmt.Sprintf("Token from interactive login is missing required permissions on project %q: %v\nPlease ensure your user has the following permissions: %v\n", gcp.ProjectId, err, requiredPerms)) + slog.WarnContext(ctx, fmt.Sprintf("Token from interactive login is missing required permissions on project %q: %v\nPlease ensure your user has the following permissions: %v\n", gcp.ProjectId, err, requiredPerms)) } else { - slog.Warn(fmt.Sprintf("Failed to validate token from interactive login on project %q: %v\n", gcp.ProjectId, err)) + slog.WarnContext(ctx, fmt.Sprintf("Failed to validate token from interactive login on project %q: %v\n", gcp.ProjectId, err)) } - slog.Warn("Please try logging in again with an account that has the required permissions.") + slog.WarnContext(ctx, "Please try logging in again with an account that has the required permissions.") continue } gcp.Options = append(gcp.Options, option.WithTokenSource(tokenSource)) @@ -193,7 +193,7 @@ func (gcp *Gcp) tryInteractiveLogin(ctx context.Context, n int) error { return fmt.Errorf("failed to marshal token: %w", err) } if gcp.TokenStore == nil { - slog.Warn("No token store configured, skipping persisting token") + slog.WarnContext(ctx, "No token store configured, skipping persisting token") return nil } if err := gcp.TokenStore.Save(tokenName, string(bytes)); err != nil { @@ -222,12 +222,12 @@ func (gcp *Gcp) findStoredCredentials(ctx context.Context) (oauth2.TokenSource, for _, name := range oauthTokenNames { tokenJson, err := gcp.TokenStore.Load(name) if err != nil { - slog.Warn(fmt.Sprintf("failed to load previously saved auth token %q: %v", name, err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to load previously saved auth token %q: %v", name, err)) continue } var token oauth2.Token if err = json.Unmarshal([]byte(tokenJson), &token); err != nil { - slog.Warn(fmt.Sprintf("failed to parse previously saved auth token %q: %v", name, err)) + slog.WarnContext(ctx, fmt.Sprintf("failed to parse previously saved auth token %q: %v", name, err)) continue } slog.Debug(fmt.Sprintf("Testing token %q from store for required permissions...", name)) diff --git a/src/pkg/clouds/gcp/storage.go b/src/pkg/clouds/gcp/storage.go index cd6a59ff5..eea8d70b0 100644 --- a/src/pkg/clouds/gcp/storage.go +++ b/src/pkg/clouds/gcp/storage.go @@ -53,7 +53,7 @@ func (gcp Gcp) EnsureBucketExists(ctx context.Context, prefix string, versioning defer client.Close() newBucketName := fmt.Sprintf("%s-%s", prefix, pkg.RandomID()) - slog.Info(fmt.Sprintf("Creating defang cd bucket %q", newBucketName)) + slog.InfoContext(ctx, fmt.Sprintf("Creating defang cd bucket %q", newBucketName)) bucket := client.Bucket(newBucketName) if err := bucket.Create(ctx, gcp.ProjectId, &storage.BucketAttrs{ diff --git a/src/pkg/dockerhub/dockerhub.go b/src/pkg/dockerhub/dockerhub.go index e044b6c08..0fcd4ebb6 100644 --- a/src/pkg/dockerhub/dockerhub.go +++ b/src/pkg/dockerhub/dockerhub.go @@ -79,7 +79,7 @@ func GenerateNewPublicOnlyPAT(ctx context.Context, label string) (string, string } pat, err = docHubClient.CreatePAT(ctx, label, []string{"repo:public_read"}) if err != nil { - slog.Info(fmt.Sprintf("Failed to create Docker Hub PAT, fallback to existing docker credentials: %v", err)) + slog.InfoContext(ctx, fmt.Sprintf("Failed to create Docker Hub PAT, fallback to existing docker credentials: %v", err)) // Fallback to use the password as PAT pat = password } diff --git a/src/pkg/login/agree_tos.go b/src/pkg/login/agree_tos.go index cd4997906..2a0a406c4 100644 --- a/src/pkg/login/agree_tos.go +++ b/src/pkg/login/agree_tos.go @@ -62,6 +62,6 @@ func nonInteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) err if err := fabric.AgreeToS(ctx); err != nil { return err } - slog.Info("You have agreed to the Defang terms of service") + slog.InfoContext(ctx, "You have agreed to the Defang terms of service") return nil } diff --git a/src/pkg/login/login.go b/src/pkg/login/login.go index 326131aa9..ab7555bb1 100644 --- a/src/pkg/login/login.go +++ b/src/pkg/login/login.go @@ -61,7 +61,7 @@ func interactiveLogin(ctx context.Context, fabricAddr string, flow LoginFlow, mc } if err := client.SaveAccessToken(fabricAddr, token); err != nil { - slog.Warn(fmt.Sprintf("%v", err)) + slog.WarnContext(ctx, fmt.Sprintf("%v", err)) var pathError *os.PathError if errors.As(err, &pathError) { term.Printf("\nTo fix file permissions, run:\n\n sudo chown -R $(whoami) %q\n", pathError.Path) @@ -131,7 +131,7 @@ func InteractiveRequireLoginAndToS(ctx context.Context, fabric client.FabricClie // Login interactively now; only do this for authorization-related errors if connect.CodeOf(err) == connect.CodeUnauthenticated { slog.Debug(fmt.Sprintln("Server error:", err)) - slog.Warn("Please log in to continue.") + slog.WarnContext(ctx, "Please log in to continue.") term.ResetWarnings() // clear any previous warnings so we don't show them again defer func() { track.Cmd(nil, "Login", P("reason", err)) }() @@ -154,7 +154,7 @@ func InteractiveRequireLoginAndToS(ctx context.Context, fabric client.FabricClie // Check if the user has agreed to the terms of service and show a prompt if needed if connect.CodeOf(err) == connect.CodeFailedPrecondition { - slog.Warn(fmt.Sprintf("%v", client.PrettyError(err))) + slog.WarnContext(ctx, fmt.Sprintf("%v", client.PrettyError(err))) defer func() { track.Cmd(nil, "Terms", P("reason", err)) }() if err = InteractiveAgreeToS(ctx, fabric); err != nil { diff --git a/src/pkg/mcp/mcp_server.go b/src/pkg/mcp/mcp_server.go index 6b3481c1a..b2f65bc0f 100644 --- a/src/pkg/mcp/mcp_server.go +++ b/src/pkg/mcp/mcp_server.go @@ -34,7 +34,7 @@ func (t *ToolTracker) TrackTool(name string, handler server.ToolHandlerFunc) ser track.Evt("MCP Tool Called", track.P("tool", name), track.P("client", t.client), track.P("cluster", t.fabricAddr), track.P("provider", *t.providerId)) resp, err := handler(ctx, request) if err != nil { - slog.Error(fmt.Sprintln("MCP Tool Failed: "+name, "error", err)) + slog.ErrorContext(ctx, fmt.Sprintln("MCP Tool Failed: "+name, "error", err)) } else { slog.Debug("MCP Tool Succeeded: " + name) } diff --git a/src/pkg/mcp/resources/resources.go b/src/pkg/mcp/resources/resources.go index e141eb226..a0c66b74b 100644 --- a/src/pkg/mcp/resources/resources.go +++ b/src/pkg/mcp/resources/resources.go @@ -38,7 +38,7 @@ func setupDocumentationResource(s *server.MCPServer) { // Read the file file, err := os.ReadFile(knowledgeBasePath) if err != nil { - slog.Error(fmt.Sprintln("Failed to read resource file", "error", err, "path", "knowledge_base.json")) + slog.ErrorContext(ctx, fmt.Sprintln("Failed to read resource file", "error", err, "path", "knowledge_base.json")) return nil, fmt.Errorf("failed to read resource file knowledge_base.json: %w", err) } @@ -68,7 +68,7 @@ func setupSamplesResource(s *server.MCPServer) { // Read the file file, err := os.ReadFile(samplesExamplesPath) if err != nil { - slog.Error(fmt.Sprintln("Failed to read resource file", "error", err, "path", "samples_examples.json")) + slog.ErrorContext(ctx, fmt.Sprintln("Failed to read resource file", "error", err, "path", "samples_examples.json")) return nil, fmt.Errorf("failed to read resource file samples_examples.json: %w", err) } diff --git a/src/pkg/migrate/heroku.go b/src/pkg/migrate/heroku.go index 29d47d2c7..9390475f6 100644 --- a/src/pkg/migrate/heroku.go +++ b/src/pkg/migrate/heroku.go @@ -30,7 +30,7 @@ type HerokuApplicationInfo struct { func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterface, appName string) (HerokuApplicationInfo, error) { var applicationInfo HerokuApplicationInfo - slog.Info("Identifying deployed dynos") + slog.InfoContext(ctx, "Identifying deployed dynos") dynos, err := client.ListDynos(ctx, appName) if err != nil { return HerokuApplicationInfo{}, fmt.Errorf("failed to list dynos: %w", err) @@ -59,7 +59,7 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf applicationInfo.ReleaseTasks = releaseTasks slog.Debug(fmt.Sprintf("Release tasks for the selected application: %+v\n", releaseTasks)) - slog.Info("Identifying configured addons") + slog.InfoContext(ctx, "Identifying configured addons") addons, err := client.ListAddons(ctx, appName) if err != nil { return HerokuApplicationInfo{}, fmt.Errorf("failed to list Heroku addons: %w", err) diff --git a/src/pkg/migrate/migrate.go b/src/pkg/migrate/migrate.go index 0a2d096fb..4050aee6c 100644 --- a/src/pkg/migrate/migrate.go +++ b/src/pkg/migrate/migrate.go @@ -68,7 +68,7 @@ func setupFromHeroku(ctx context.Context, fabric client.FabricClient, surveyor s return "", fmt.Errorf("failed to select source application: %w", err) } - slog.Info(fmt.Sprintf("Collecting information about %q...", sourceApp)) + slog.InfoContext(ctx, fmt.Sprintf("Collecting information about %q...", sourceApp)) applicationInfo, err := collectHerokuApplicationInfo(ctx, herokuClient, sourceApp) if err != nil { @@ -84,7 +84,7 @@ func setupFromHeroku(ctx context.Context, fabric client.FabricClient, surveyor s slog.Debug(fmt.Sprintf("Sanitized application info: %+v\n", sanitizedApplicationInfo)) - slog.Info("Generating compose file...") + slog.InfoContext(ctx, "Generating compose file...") composeFileContents, err := generateComposeFile(ctx, fabric, defangv1.SourcePlatform_SOURCE_PLATFORM_HEROKU, sourceApp, sanitizedApplicationInfo) if err != nil { diff --git a/src/pkg/session/session.go b/src/pkg/session/session.go index ed396eed4..81d313591 100644 --- a/src/pkg/session/session.go +++ b/src/pkg/session/session.go @@ -67,7 +67,7 @@ func (sl *SessionLoader) LoadSession(ctx context.Context) (*Session, error) { if stack.Provider == client.ProviderDefang { extraMsg = "; consider using BYOC (https://s.defang.io/byoc)" } - slog.Info(fmt.Sprintf("Using the %q stack on %s from %s%s", stack.Name, stack.Provider, whence, extraMsg)) + slog.InfoContext(ctx, fmt.Sprintf("Using the %q stack on %s from %s%s", stack.Name, stack.Provider, whence, extraMsg)) printProviderMismatchWarnings(ctx, stack.Provider) return session, nil @@ -106,28 +106,28 @@ func printProviderMismatchWarnings(ctx context.Context, provider client.Provider // Ignore any env vars when explicitly using the Defang playground provider // Defaults to defang provider in non-interactive mode if env := pkg.AwsInEnv(); env != "" { - slog.Warn(fmt.Sprintf("AWS environment variables were detected (%v); did you forget --provider=aws or DEFANG_PROVIDER=aws?", env)) + slog.WarnContext(ctx, fmt.Sprintf("AWS environment variables were detected (%v); did you forget --provider=aws or DEFANG_PROVIDER=aws?", env)) } if env := pkg.DoInEnv(); env != "" { - slog.Warn(fmt.Sprintf("DigitalOcean environment variable was detected (%v); did you forget --provider=digitalocean or DEFANG_PROVIDER=digitalocean?", env)) + slog.WarnContext(ctx, fmt.Sprintf("DigitalOcean environment variable was detected (%v); did you forget --provider=digitalocean or DEFANG_PROVIDER=digitalocean?", env)) } if env := pkg.GcpInEnv(); env != "" { - slog.Warn(fmt.Sprintf("GCP project environment variable was detected (%v); did you forget --provider=gcp or DEFANG_PROVIDER=gcp?", env)) + slog.WarnContext(ctx, fmt.Sprintf("GCP project environment variable was detected (%v); did you forget --provider=gcp or DEFANG_PROVIDER=gcp?", env)) } } switch provider { case client.ProviderAWS: if !awsInConfig(ctx) { - slog.Warn("AWS provider was selected, but AWS environment is not set") + slog.WarnContext(ctx, "AWS provider was selected, but AWS environment is not set") } case client.ProviderDO: if env := pkg.DoInEnv(); env == "" { - slog.Warn("DigitalOcean provider was selected, but DIGITALOCEAN_TOKEN environment variable is not set") + slog.WarnContext(ctx, "DigitalOcean provider was selected, but DIGITALOCEAN_TOKEN environment variable is not set") } case client.ProviderGCP: if env := pkg.GcpInEnv(); env == "" { - slog.Warn(fmt.Sprintf("GCP provider was selected, but no GCP project environment variable is set (%v)", pkg.GCPProjectEnvVars)) + slog.WarnContext(ctx, fmt.Sprintf("GCP provider was selected, but no GCP project environment variable is set (%v)", pkg.GCPProjectEnvVars)) } } } diff --git a/src/pkg/setup/setup.go b/src/pkg/setup/setup.go index 75f19cc34..d0264c4c1 100644 --- a/src/pkg/setup/setup.go +++ b/src/pkg/setup/setup.go @@ -145,7 +145,7 @@ func (s *SetupClient) AIGenerate(ctx context.Context) (SetupResult, error) { track.Evt(GenerateStartedEvt, P("language", prompt.Language), P("description", prompt.Description), P("folder", folder), P("model", prompt.ModelID)) beforeGenerate(folder) - slog.Info("Working on it. This may take 1 or 2 minutes...") + slog.InfoContext(ctx, "Working on it. This may take 1 or 2 minutes...") args := cli.GenerateArgs{ Description: prompt.Description, Folder: folder, @@ -181,7 +181,7 @@ func (s *SetupClient) CloneSample(ctx context.Context, sample string) (SetupResu } track.Evt(GenerateStartedEvt, P("sample", sample), P("folder", folder)) beforeGenerate(folder) - slog.Info("Fetching sample from the Defang repository...") + slog.InfoContext(ctx, "Fetching sample from the Defang repository...") err = cli.InitFromSamples(ctx, folder, []string{sample}) if err != nil { return SetupResult{}, err @@ -232,7 +232,7 @@ func (s *SetupClient) MigrateFromHeroku(ctx context.Context) (SetupResult, error return SetupResult{}, err } - slog.Info("Ok, let's create a compose file for your existing deployment.") + slog.InfoContext(ctx, "Ok, let's create a compose file for your existing deployment.") heroku := migrate.NewHerokuClient() composeFileContents, err := migrate.InteractiveSetup(ctx, s.Fabric, s.Surveyor, heroku, migrate.SourcePlatformHeroku) if err != nil { @@ -244,9 +244,9 @@ func (s *SetupClient) MigrateFromHeroku(ctx context.Context) (SetupResult, error return SetupResult{}, fmt.Errorf("failed to write compose file: %w", err) } - slog.Info(fmt.Sprintln("Compose file written to", composeFilePath)) - slog.Info("Your application is now ready to deploy with Defang.") - slog.Info("For next steps, visit https://s.defang.io/from-heroku") + slog.InfoContext(ctx, fmt.Sprintln("Compose file written to", composeFilePath)) + slog.InfoContext(ctx, "Your application is now ready to deploy with Defang.") + slog.InfoContext(ctx, "For next steps, visit https://s.defang.io/from-heroku") return SetupResult{Folder: "."}, nil } diff --git a/src/pkg/stacks/manager.go b/src/pkg/stacks/manager.go index 7cd9c5f40..a4650ddc0 100644 --- a/src/pkg/stacks/manager.go +++ b/src/pkg/stacks/manager.go @@ -108,7 +108,7 @@ func (sm *manager) ListRemote(ctx context.Context) ([]ListItem, error) { bytes := stack.GetStackFile() params, err := NewParametersFromContent(name, bytes) if err != nil { - slog.Warn(fmt.Sprintf("Skipping invalid remote stack %s: %v\n", name, err)) + slog.WarnContext(ctx, fmt.Sprintf("Skipping invalid remote stack %s: %v\n", name, err)) continue } // fill in missing fields from remote stack info @@ -150,7 +150,7 @@ func (sm *manager) Load(ctx context.Context, name string) (*Parameters, error) { params, err := sm.LoadLocal(name) if err != nil { if errors.Is(err, os.ErrNotExist) { - slog.Info(fmt.Sprintf("stack file not found, attempting to import from previous deployments: %v", err)) + slog.InfoContext(ctx, fmt.Sprintf("stack file not found, attempting to import from previous deployments: %v", err)) return sm.GetRemote(ctx, name) } return nil, err @@ -283,7 +283,7 @@ func (sm *manager) getSpecifiedStack(ctx context.Context, name string) (*Paramet return nil, "", fmt.Errorf("failed to save imported stack %q to local directory: %w", name, err) } if stackFilename != "" { - slog.Info(fmt.Sprintf("Stack %q loaded and saved to %q. Add this file to source control.", name, stackFilename)) + slog.InfoContext(ctx, fmt.Sprintf("Stack %q loaded and saved to %q. Add this file to source control.", name, stackFilename)) } return stack, whence + " and previous deployment", nil } @@ -324,7 +324,7 @@ func (sm *manager) getDefaultStack(ctx context.Context) (*Parameters, string, er return nil, whence, fmt.Errorf("using default stack %q for project %q, but the stack specifies COMPOSE_PROJECT_NAME=%q", res.Stack.Name, sm.projectName, pn) } if cf, ok := params.Variables["COMPOSE_FILE"]; ok { - slog.Warn(fmt.Sprintf("Using default stack %q for project %q, but the stack specifies COMPOSE_FILE=%q", res.Stack.Name, sm.projectName, cf)) + slog.WarnContext(ctx, fmt.Sprintf("Using default stack %q for project %q, but the stack specifies COMPOSE_FILE=%q", res.Stack.Name, sm.projectName, cf)) } return params, whence, nil } From 714d556c7fec9639d21ed90e36bc870852dbf4b4 Mon Sep 17 00:00:00 2001 From: jordanstephens Date: Tue, 21 Apr 2026 17:29:39 -0700 Subject: [PATCH 6/7] fix: replace fmt.Sprintln with fmt.Sprint in slog callsites Co-Authored-By: Claude Sonnet 4.6 --- src/cmd/cli/command/commands.go | 8 +++---- src/cmd/cli/command/compose.go | 26 +++++++++++------------ src/cmd/cli/command/config.go | 2 +- src/cmd/cli/command/generate.go | 2 +- src/cmd/cli/command/session.go | 2 +- src/cmd/cli/command/whoami.go | 2 +- src/pkg/agent/tools/listConfig.go | 2 +- src/pkg/agent/tools/logs.go | 4 ++-- src/pkg/cli/cd.go | 4 ++-- src/pkg/cli/client/byoc/aws/alb_logs.go | 2 +- src/pkg/cli/client/byoc/aws/byoc.go | 16 +++++++------- src/pkg/cli/client/byoc/aws/domain.go | 6 +++--- src/pkg/cli/client/byoc/aws/list.go | 2 +- src/pkg/cli/client/byoc/baseclient.go | 2 +- src/pkg/cli/client/byoc/common.go | 2 +- src/pkg/cli/client/byoc/do/byoc.go | 4 ++-- src/pkg/cli/client/byoc/gcp/byoc.go | 4 ++-- src/pkg/cli/client/byoc/state/parse.go | 4 ++-- src/pkg/cli/client/grpc_logger.go | 2 +- src/pkg/cli/client/playground.go | 2 +- src/pkg/cli/client/pretty_error.go | 2 +- src/pkg/cli/client/projectName.go | 2 +- src/pkg/cli/compose/context.go | 12 +++++------ src/pkg/cli/composeUp.go | 6 +++--- src/pkg/cli/connect.go | 2 +- src/pkg/cli/logout.go | 6 +++--- src/pkg/cli/tail.go | 12 +++++------ src/pkg/cli/tailAndMonitor.go | 2 +- src/pkg/cli/token.go | 2 +- src/pkg/cli/whoami.go | 2 +- src/pkg/clouds/aws/codebuild/cfn/setup.go | 6 +++--- src/pkg/debug/debug_test.go | 2 +- src/pkg/login/agree_tos.go | 4 ++-- src/pkg/login/login.go | 4 ++-- src/pkg/mcp/mcp_server.go | 2 +- src/pkg/mcp/resources/resources.go | 4 ++-- src/pkg/mcp/utils.go | 12 +++++------ src/pkg/setup/setup.go | 2 +- src/pkg/tokenstore/store.go | 4 ++-- 39 files changed, 93 insertions(+), 93 deletions(-) diff --git a/src/cmd/cli/command/commands.go b/src/cmd/cli/command/commands.go index 46a36857a..efe1f7545 100644 --- a/src/cmd/cli/command/commands.go +++ b/src/cmd/cli/command/commands.go @@ -50,7 +50,7 @@ func Execute(ctx context.Context) error { if err := RootCmd.ExecuteContext(ctx); err != nil { if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { - slog.ErrorContext(ctx, fmt.Sprintln("Error:", client.PrettyError(err))) + slog.ErrorContext(ctx, fmt.Sprint("Error:", client.PrettyError(err))) track.Evt("CLI Error", P("err", err)) } @@ -107,7 +107,7 @@ func Execute(ctx context.Context) error { if global.HasTty && !global.HideUpdate && pkg.RandomIndex(10) == 0 { if latest, err := github.GetLatestReleaseTag(ctx); err == nil && isNewer(GetCurrentVersion(), latest) { - slog.Debug(fmt.Sprintln("Latest Version:", latest, "Current Version:", GetCurrentVersion())) + slog.Debug("Newer version", "github", latest, "current", GetCurrentVersion()) fmt.Println("A newer version of the CLI is available at https://github.com/DefangLabs/defang/releases/latest") if pkg.RandomIndex(10) == 0 && !pkg.GetenvBool("DEFANG_HIDE_HINTS") { fmt.Println("To silence these notices, do: export DEFANG_HIDE_UPDATE=1") @@ -410,14 +410,14 @@ var RootCmd = &cobra.Command{ if connect.CodeOf(err) != connect.CodeUnauthenticated { return err } - slog.Debug(fmt.Sprintln("Using existing token failed; continuing to allow login/ToS flow:", err)) + slog.Debug(fmt.Sprint("Using existing token failed; continuing to allow login/ToS flow:", err)) } track.Tracker = global.Client // update tracker with the real client if v, err := global.Client.GetVersions(ctx); err == nil { version := cmd.Root().Version // HACK to avoid circular dependency with RootCmd - slog.Debug(fmt.Sprintln("Fabric:", v.Fabric, "CLI:", version, "CLI-Min:", v.CliMin)) + slog.Debug(fmt.Sprint("Fabric:", v.Fabric, "CLI:", version, "CLI-Min:", v.CliMin)) if global.HasTty && isNewer(version, v.CliMin) && !isUpgradeCommand(cmd) { slog.WarnContext(ctx, "Your CLI version is outdated. Please upgrade to the latest version by running:\n\n defang upgrade\n") global.HideUpdate = true // hide the upgrade hint at the end diff --git a/src/cmd/cli/command/compose.go b/src/cmd/cli/command/compose.go index 8ea317cf1..be9ad79bb 100644 --- a/src/cmd/cli/command/compose.go +++ b/src/cmd/cli/command/compose.go @@ -116,7 +116,7 @@ func makeComposeUpCmd() *cobra.Command { Mode: session.Stack.Mode, }) if err != nil { - slog.Debug(fmt.Sprintln("Failed to create stack:", err)) + slog.Debug(fmt.Sprint("Failed to create stack:", err)) } } @@ -161,7 +161,7 @@ func makeComposeUpCmd() *cobra.Command { if deploy.Etag != "" { tailSource = "deployment ID " + deploy.Etag } - slog.InfoContext(ctx, fmt.Sprintln("Tailing logs for", tailSource, "; press Ctrl+C to detach:")) + slog.InfoContext(ctx, fmt.Sprint("Tailing logs for", tailSource, "; press Ctrl+C to detach:")) tailOptions := newTailOptionsForDeploy(session.Stack.Name, deploy.Etag, since, global.Verbose) serviceStates, err := cli.TailAndMonitor(ctx, project, session.Provider, time.Duration(waitTimeout)*time.Second, tailOptions) @@ -169,7 +169,7 @@ func makeComposeUpCmd() *cobra.Command { deploymentErr := err debugger, err := debug.NewDebugger(ctx, global.FabricAddr, session.Stack) if err != nil { - slog.WarnContext(ctx, fmt.Sprintln("Failed to initialize debugger:", err)) + slog.WarnContext(ctx, fmt.Sprint("Failed to initialize debugger:", err)) return deploymentErr } handleTailAndMonitorErr(ctx, deploymentErr, debugger, debug.DebugConfig{ @@ -256,7 +256,7 @@ func confirmDeployment(targetDirectory string, existingDeployments []*defangv1.D } func printExistingDeployments(existingDeployments []*defangv1.Deployment) { - slog.Info("This project was previously deployed to the following locations:") + fmt.Println("This project was previously deployed to the following locations:") deploymentStrings := make([]string, 0, len(existingDeployments)) for _, dep := range existingDeployments { var providerId client.ProviderID @@ -284,7 +284,7 @@ func confirmDeploymentToNewLocation() (bool, error) { func promptToCreateStack(ctx context.Context, targetDirectory string, params stacks.Parameters) error { if global.NonInteractive { - slog.InfoContext(ctx, "Consider creating a stack to manage your deployments.") + fmt.Println("Consider creating a stack to manage your deployments.") printDefangHint("To create a stack, do:", "stack new --name="+params.Name) return nil } @@ -311,7 +311,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * } if connect.CodeOf(originalErr) == connect.CodeResourceExhausted && strings.Contains(originalErr.Error(), "maximum number of projects") { - slog.ErrorContext(ctx, fmt.Sprintln("Error:", client.PrettyError(originalErr))) + slog.ErrorContext(ctx, fmt.Sprint("Error:", client.PrettyError(originalErr))) err := handleTooManyProjectsError(ctx, provider, originalErr) if err != nil { return originalErr @@ -323,7 +323,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * return originalErr } - slog.ErrorContext(ctx, fmt.Sprintln("Error:", client.PrettyError(originalErr))) + slog.ErrorContext(ctx, fmt.Sprint("Error:", client.PrettyError(originalErr))) return debugger.DebugDeploymentError(ctx, debug.DebugConfig{ Project: project, }, originalErr) @@ -332,7 +332,7 @@ func handleComposeUpErr(ctx context.Context, debugger *debug.Debugger, project * func handleTooManyProjectsError(ctx context.Context, provider client.Provider, originalErr error) error { projectName, err := provider.RemoteProjectName(ctx) if err != nil { - slog.WarnContext(ctx, fmt.Sprintln("failed to get remote project name:", err)) + slog.WarnContext(ctx, fmt.Sprint("failed to get remote project name:", err)) return originalErr } @@ -344,7 +344,7 @@ func handleTooManyProjectsError(ctx context.Context, provider client.Provider, o _, err = cli.InteractiveComposeDown(ctx, projectName, global.Client, provider) if err != nil { - slog.WarnContext(ctx, fmt.Sprintln("ComposeDown failed:", err)) + slog.WarnContext(ctx, fmt.Sprint("ComposeDown failed:", err)) printDefangHint("To deactivate a project, do:", "compose down --project-name "+projectName) return originalErr } else { @@ -444,7 +444,7 @@ func makeComposeDownCmd() *cobra.Command { return err } - slog.Info(fmt.Sprintln("Deleted services, deployment ID", deployment)) + slog.Info(fmt.Sprint("Deleted services, deployment ID", deployment)) listConfigs, err := session.Provider.ListConfig(cmd.Context(), &defangv1.ListConfigsRequest{Project: projectName}) if err == nil { @@ -521,7 +521,7 @@ func makeComposeConfigCmd() *cobra.Command { CheckAccountInfo: false, }) if err != nil { - slog.WarnContext(ctx, fmt.Sprintln("unable to load stack:", err, "- some information may not be up-to-date")) + slog.WarnContext(ctx, fmt.Sprint("unable to load stack:", err, "- some information may not be up-to-date")) sessionx = &session.Session{ Loader: configureLoaderForCommand(cmd), Provider: client.NewPlaygroundProvider(global.Client, stacks.DefaultBeta), @@ -531,7 +531,7 @@ func makeComposeConfigCmd() *cobra.Command { _, err = sessionx.Provider.AccountInfo(ctx) if err != nil { - slog.WarnContext(ctx, fmt.Sprintln("unable to connect to cloud provider:", err, "- some information may not be up-to-date")) + slog.WarnContext(ctx, fmt.Sprint("unable to connect to cloud provider:", err, "- some information may not be up-to-date")) } project, loadErr := sessionx.Loader.LoadProject(ctx) @@ -677,7 +677,7 @@ func handleLogsCmd(cmd *cobra.Command, args []string) error { if pkg.IsValidTime(untilTs) { rangeStr += " until " + untilTs.Format(time.RFC3339Nano) } - slog.Info(fmt.Sprintf("Showing logs%s; press Ctrl+C to stop:", rangeStr)) + fmt.Printf("Showing logs%s; press Ctrl+C to stop:\n", rangeStr) services := args if len(name) > 0 { diff --git a/src/cmd/cli/command/config.go b/src/cmd/cli/command/config.go index a742406aa..65c31d7f9 100644 --- a/src/cmd/cli/command/config.go +++ b/src/cmd/cli/command/config.go @@ -203,7 +203,7 @@ var configDeleteCmd = &cobra.Command{ } return err } - slog.Info(fmt.Sprintln("Deleted", names)) + slog.Info(fmt.Sprint("Deleted", names)) printDefangHint("To list the configs (but not their values), do:", "config ls") return nil diff --git a/src/cmd/cli/command/generate.go b/src/cmd/cli/command/generate.go index 5fe5c74a0..e9f77e97d 100644 --- a/src/cmd/cli/command/generate.go +++ b/src/cmd/cli/command/generate.go @@ -52,7 +52,7 @@ var generateCmd = &cobra.Command{ } func afterGenerate(ctx context.Context, result setup.SetupResult) { - slog.InfoContext(ctx, fmt.Sprintln("Code generated successfully in folder", result.Folder)) + slog.InfoContext(ctx, fmt.Sprint("Code generated successfully in folder", result.Folder)) editor := pkg.Getenv("DEFANG_EDITOR", "code") // TODO: should we use EDITOR env var instead? But won't handle terminal editors like vim cmdd := exec.Command(editor, result.Folder) err := cmdd.Start() diff --git a/src/cmd/cli/command/session.go b/src/cmd/cli/command/session.go index d7f58b4e8..2ce2caebe 100644 --- a/src/cmd/cli/command/session.go +++ b/src/cmd/cli/command/session.go @@ -167,7 +167,7 @@ func handleInvalidComposeFileErr(ctx context.Context, loadErr error) error { return loadErr } - slog.ErrorContext(ctx, fmt.Sprintln("Cannot load project:", loadErr)) + slog.ErrorContext(ctx, fmt.Sprint("Cannot load project:", loadErr)) project, err := compose.NewLoader().CreateProjectForDebug() if err != nil { return fmt.Errorf("%w; original error: %w", err, loadErr) diff --git a/src/cmd/cli/command/whoami.go b/src/cmd/cli/command/whoami.go index f4bd3654b..9364a26fd 100644 --- a/src/cmd/cli/command/whoami.go +++ b/src/cmd/cli/command/whoami.go @@ -40,7 +40,7 @@ var whoamiCmd = &cobra.Command{ userInfo, err = auth.FetchUserInfo(ctx, token) if err != nil { // Either the auth service is down, or we're using a Fabric JWT: skip workspace information - slog.WarnContext(ctx, fmt.Sprintln("Workspace information unavailable:", err)) + slog.WarnContext(ctx, fmt.Sprint("Workspace information unavailable:", err)) } } diff --git a/src/pkg/agent/tools/listConfig.go b/src/pkg/agent/tools/listConfig.go index b34ababef..e1db27553 100644 --- a/src/pkg/agent/tools/listConfig.go +++ b/src/pkg/agent/tools/listConfig.go @@ -46,7 +46,7 @@ func HandleListConfigTool(ctx context.Context, loader client.Loader, params List if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) } - slog.Debug(fmt.Sprintln("Project name loaded:", projectName)) + slog.Debug(fmt.Sprint("Project name loaded:", projectName)) slog.Debug("Function invoked: cli.ConfigList") config, err := cli.ListConfig(ctx, provider, projectName) diff --git a/src/pkg/agent/tools/logs.go b/src/pkg/agent/tools/logs.go index 022b34ace..c904329e9 100644 --- a/src/pkg/agent/tools/logs.go +++ b/src/pkg/agent/tools/logs.go @@ -67,7 +67,7 @@ func HandleLogsTool(ctx context.Context, loader client.Loader, params LogsParams if err != nil { return "", fmt.Errorf("failed to load project name: %w", err) } - slog.Debug(fmt.Sprintln("Project name loaded:", projectName)) + slog.Debug(fmt.Sprint("Project name loaded:", projectName)) err = cli.CanIUseProvider(ctx, client, provider, projectName, 0) if err != nil { @@ -86,7 +86,7 @@ func HandleLogsTool(ctx context.Context, loader client.Loader, params LogsParams }) if err != nil { - slog.ErrorContext(ctx, fmt.Sprintln("Failed to fetch logs", "error", err)) + slog.ErrorContext(ctx, fmt.Sprint("Failed to fetch logs", "error", err)) return "", fmt.Errorf("failed to fetch logs: %w", err) } diff --git a/src/pkg/cli/cd.go b/src/pkg/cli/cd.go index 8335fe77b..2f6b46078 100644 --- a/src/pkg/cli/cd.go +++ b/src/pkg/cli/cd.go @@ -81,7 +81,7 @@ func deleteSubdomain(ctx context.Context, projectName string, provider client.Pr }) if err != nil { // This can fail when the project was deployed from a different workspace than the current one - slog.Debug(fmt.Sprintln("DeleteSubdomainZone failed:", err)) + slog.Debug(fmt.Sprint("DeleteSubdomainZone failed:", err)) if connect.CodeOf(err) == connect.CodeNotFound { slog.WarnContext(ctx, "Subdomain not found; did you mean to destroy a different project or stack?") } @@ -122,7 +122,7 @@ func TailAndWaitForCD(ctx context.Context, provider client.Provider, projectName // blocking call to tail var tailErr error if err := streamLogs(ctx, provider, projectName, tailOptions, logEntryPrintHandler); err != nil { - slog.Debug(fmt.Sprintln("Tail stopped with", err, errors.Unwrap(err))) + slog.Debug(fmt.Sprint("Tail stopped with", err, errors.Unwrap(err))) if !errors.Is(err, context.Canceled) { tailErr = err } diff --git a/src/pkg/cli/client/byoc/aws/alb_logs.go b/src/pkg/cli/client/byoc/aws/alb_logs.go index 9531facbb..8991bf925 100644 --- a/src/pkg/cli/client/byoc/aws/alb_logs.go +++ b/src/pkg/cli/client/byoc/aws/alb_logs.go @@ -34,7 +34,7 @@ func (b *ByocAws) fetchAndStreamAlbLogs(ctx context.Context, projectName string, if b.Prefix != "" { bucketPrefix = b.Prefix + "-" + bucketPrefix } - slog.Debug(fmt.Sprintln("Query ALB logs", bucketPrefix)) + slog.Debug(fmt.Sprint("Query ALB logs", bucketPrefix)) if len(bucketPrefix) > 31 { // HACK: AWS CD truncates the ALB name to 31 characters (because of the long Terraform suffix) bucketPrefix = bucketPrefix[:31] diff --git a/src/pkg/cli/client/byoc/aws/byoc.go b/src/pkg/cli/client/byoc/aws/byoc.go index 0ce54d996..11d7e5603 100644 --- a/src/pkg/cli/client/byoc/aws/byoc.go +++ b/src/pkg/cli/client/byoc/aws/byoc.go @@ -92,7 +92,7 @@ func AnnotateAwsError(err error) error { if err == nil { return nil } - slog.Debug(fmt.Sprintln("AWS error:", err)) + slog.Debug(fmt.Sprint("AWS error:", err)) if strings.Contains(err.Error(), "missing AWS region:") { return ErrMissingAwsRegion{err} } @@ -612,14 +612,14 @@ func (b *ByocAws) GetProjectUpdate(ctx context.Context, projectName string) (*de s3Client := aws.NewS3FromConfig(cfg) path := b.GetProjectUpdatePath(projectName) - slog.Debug(fmt.Sprintln("Getting services from bucket:", bucketName, path)) + slog.Debug(fmt.Sprint("Getting services from bucket:", bucketName, path)) getObjectOutput, err := s3Client.GetObject(ctx, &s3.GetObjectInput{ Bucket: &bucketName, Key: &path, }) if err != nil { if aws.IsS3NoSuchKeyError(err) { - slog.Debug(fmt.Sprintln("s3.GetObject:", err)) + slog.Debug(fmt.Sprint("s3.GetObject:", err)) return nil, client.ErrNotExist // no services yet } return nil, AnnotateAwsError(err) @@ -859,15 +859,15 @@ func (b *ByocAws) getLogGroupInputs(etag types.ETag, projectName, service, filte cdTail.LogStreamNames = []string{awscodebuild.GetLogStreamForBuildID(b.cdBuildId)} } groups = append(groups, cdTail) - slog.Debug(fmt.Sprintln("Query CD logs", cdTail.LogGroupARN, cdTail.LogStreamNames, filter)) + slog.Debug(fmt.Sprint("Query CD logs", cdTail.LogGroupARN, cdTail.LogStreamNames, filter)) } } if logType.Has(logs.LogTypeBuild) && projectName != "" { buildsTail := cw.LogGroupInput{LogGroupARN: b.makeLogGroupARN(b.StackDir(projectName, "builds")), LogEventFilterPattern: pattern} // must match logic in ecs/common.ts; TODO: filter by etag/service - slog.Debug(fmt.Sprintln("Query builds logs", buildsTail.LogGroupARN, filter)) + slog.Debug(fmt.Sprint("Query builds logs", buildsTail.LogGroupARN, filter)) groups = append(groups, buildsTail) ecsTail := cw.LogGroupInput{LogGroupARN: b.makeLogGroupARN(b.StackDir(projectName, "ecs")), LogEventFilterPattern: pattern} // must match logic in ecs/common.ts; TODO: filter by etag/service/deploymentId - slog.Debug(fmt.Sprintln("Query ecs events logs", ecsTail.LogGroupARN, filter)) + slog.Debug(fmt.Sprint("Query ecs events logs", ecsTail.LogGroupARN, filter)) groups = append(groups, ecsTail) } // Tail services @@ -876,7 +876,7 @@ func (b *ByocAws) getLogGroupInputs(etag types.ETag, projectName, service, filte if service != "" && etag != "" { servicesTail.LogStreamNamePrefix = service + "/" + service + "_" + etag } - slog.Debug(fmt.Sprintln("Query services logs", servicesTail.LogGroupARN, servicesTail.LogStreamNamePrefix, pattern)) + slog.Debug(fmt.Sprint("Query services logs", servicesTail.LogGroupARN, servicesTail.LogStreamNamePrefix, pattern)) groups = append(groups, servicesTail) } return groups @@ -934,7 +934,7 @@ func (b *ByocAws) DeleteConfig(ctx context.Context, secrets *defangv1.Secrets) e for i, name := range secrets.Names { ids[i] = b.getSecretID(secrets.Project, name) } - slog.Debug(fmt.Sprintln("Deleting parameters", ids)) + slog.Debug(fmt.Sprint("Deleting parameters", ids)) if err := b.driver.DeleteSecrets(ctx, ids...); err != nil { return AnnotateAwsError(err) } diff --git a/src/pkg/cli/client/byoc/aws/domain.go b/src/pkg/cli/client/byoc/aws/domain.go index bc7206b04..9649a565e 100644 --- a/src/pkg/cli/client/byoc/aws/domain.go +++ b/src/pkg/cli/client/byoc/aws/domain.go @@ -46,7 +46,7 @@ func prepareDomainDelegation(ctx context.Context, projectDomain, projectName, st slog.WarnContext(ctx, fmt.Sprintf("Failed to find existing usable delegation set: %v, creating a new one", err)) } if delegationSet != nil { - slog.Debug(fmt.Sprintln("Reusing existing usable Route53 delegation set:", *delegationSet.Id)) + slog.Debug(fmt.Sprint("Reusing existing usable Route53 delegation set:", *delegationSet.Id)) } else { delegationSet, err = createUsableDelegationSet(ctx, projectDomain, r53Client, resolverAt) if err != nil { @@ -59,7 +59,7 @@ func prepareDomainDelegation(ctx context.Context, projectDomain, projectName, st return nil, "", errors.New("no NS records found for the delegation set") // should not happen } if delegationSet.Id != nil { - slog.Debug(fmt.Sprintln("Route53 delegation set ID:", *delegationSet.Id)) + slog.Debug(fmt.Sprint("Route53 delegation set ID:", *delegationSet.Id)) delegationSetId = strings.TrimPrefix(*delegationSet.Id, "/delegationset/") } @@ -165,7 +165,7 @@ func getOrCreateDelegationSetByZones(ctx context.Context, zones []*types.HostedZ // Create or get the reusable delegation set for the existing subdomain zone delegationSet, err = aws.CreateDelegationSet(ctx, zone.Id, r53Client) if delegationSetAlreadyReusable := new(types.DelegationSetAlreadyReusable); errors.As(err, &delegationSetAlreadyReusable) { - slog.Debug(fmt.Sprintln("Route53 delegation set already created:", err)) + slog.Debug(fmt.Sprint("Route53 delegation set already created:", err)) delegationSet, err = aws.GetDelegationSetByZone(ctx, zone.Id, r53Client) } if err != nil { diff --git a/src/pkg/cli/client/byoc/aws/list.go b/src/pkg/cli/client/byoc/aws/list.go index c3ed399a0..80b36c605 100644 --- a/src/pkg/cli/client/byoc/aws/list.go +++ b/src/pkg/cli/client/byoc/aws/list.go @@ -62,7 +62,7 @@ type S3Client interface { func ListPulumiStacks(ctx context.Context, s3client S3Client, bucketName string) (iter.Seq[state.PulumiState], error) { prefix := `.pulumi/stacks/` // TODO: should we filter on `projectName`? - slog.Debug(fmt.Sprintln("Listing stacks in bucket:", bucketName)) + slog.Debug(fmt.Sprint("Listing stacks in bucket:", bucketName)) out, err := s3client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ Bucket: &bucketName, Prefix: &prefix, diff --git a/src/pkg/cli/client/byoc/baseclient.go b/src/pkg/cli/client/byoc/baseclient.go index 0310056da..3cd2bb5b3 100644 --- a/src/pkg/cli/client/byoc/baseclient.go +++ b/src/pkg/cli/client/byoc/baseclient.go @@ -109,7 +109,7 @@ func (b *ByocBaseClient) RemoteProjectName(ctx context.Context) (string, error) if len(projectNames) > 1 { return "", ErrMultipleProjects{ProjectNames: projectNames} } - slog.Debug(fmt.Sprintln("Using default project:", projectNames[0])) + slog.Debug(fmt.Sprint("Using default project:", projectNames[0])) return projectNames[0], nil } diff --git a/src/pkg/cli/client/byoc/common.go b/src/pkg/cli/client/byoc/common.go index 407a2757f..aaf8c9d93 100644 --- a/src/pkg/cli/client/byoc/common.go +++ b/src/pkg/cli/client/byoc/common.go @@ -45,7 +45,7 @@ func GetPulumiBackend(stateUrl string) (string, string, error) { } func runLocalCommand(ctx context.Context, dir string, env []string, cmd ...string) error { - slog.Debug(fmt.Sprintln("Running local command `", cmd, "` in dir ", dir)) + slog.Debug(fmt.Sprint("Running local command `", cmd, "` in dir ", dir)) // TODO - use enums to define commands instead of passing strings down from the caller // #nosec G204 command := exec.CommandContext(ctx, cmd[0], cmd[1:]...) diff --git a/src/pkg/cli/client/byoc/do/byoc.go b/src/pkg/cli/client/byoc/do/byoc.go index 1d9857762..fdec9250a 100644 --- a/src/pkg/cli/client/byoc/do/byoc.go +++ b/src/pkg/cli/client/byoc/do/byoc.go @@ -110,7 +110,7 @@ func (b *ByocDo) GetProjectUpdate(ctx context.Context, projectName string) (*def if err != nil { if aws.IsS3NoSuchKeyError(err) { - slog.Debug(fmt.Sprintln("s3.GetObject:", err)) + slog.Debug(fmt.Sprint("s3.GetObject:", err)) return nil, client.ErrNotExist // no services yet } return nil, awsbyoc.AnnotateAwsError(err) @@ -427,7 +427,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter if deploymentID == "" || appID == "" { //Look up the CD app directly instead of relying on the etag - slog.Debug(fmt.Sprintln("Fetching app and deployment ID for app", appPlatform.CdName)) + slog.Debug(fmt.Sprint("Fetching app and deployment ID for app", appPlatform.CdName)) cdApp, err := b.getAppByName(ctx, appPlatform.CdName) if err != nil { return nil, err diff --git a/src/pkg/cli/client/byoc/gcp/byoc.go b/src/pkg/cli/client/byoc/gcp/byoc.go index bee3c4937..f907ae48f 100644 --- a/src/pkg/cli/client/byoc/gcp/byoc.go +++ b/src/pkg/cli/client/byoc/gcp/byoc.go @@ -311,7 +311,7 @@ func (b *ByocGcp) CdList(ctx context.Context, _allRegions bool) (iter.Seq[state. prefix := `.pulumi/stacks/` // TODO: should we filter on `projectName`? uploadSA := b.driver.GetServiceAccountEmail(DefangUploadServiceAccountName) - slog.Debug(fmt.Sprintln("Getting services from pulumi stacks bucket:", bucketName, prefix, uploadSA)) + slog.Debug(fmt.Sprint("Getting services from pulumi stacks bucket:", bucketName, prefix, uploadSA)) objLoader := func(ctx context.Context, bucket, object string) ([]byte, error) { return b.driver.GetBucketObjectWithServiceAccount(ctx, bucket, object, uploadSA) } @@ -843,7 +843,7 @@ func (b *ByocGcp) GetProjectUpdate(ctx context.Context, projectName string) (*de // Current user might not have object viewer access to the bucket, use the upload service account to get the object uploadSA := b.driver.GetServiceAccountEmail(DefangUploadServiceAccountName) - slog.Debug(fmt.Sprintln("Getting services from bucket:", bucketName, path, uploadSA)) + slog.Debug(fmt.Sprint("Getting services from bucket:", bucketName, path, uploadSA)) pbBytes, err := b.driver.GetBucketObjectWithServiceAccount(ctx, bucketName, path, uploadSA) if err != nil { slog.Debug(fmt.Sprintf("Failed to get project bucket object from bucket %q at path %q with service account %q: %v", bucketName, path, uploadSA, err)) diff --git a/src/pkg/cli/client/byoc/state/parse.go b/src/pkg/cli/client/byoc/state/parse.go index 617b753da..ee4b6480d 100644 --- a/src/pkg/cli/client/byoc/state/parse.go +++ b/src/pkg/cli/client/byoc/state/parse.go @@ -86,12 +86,12 @@ func ParsePulumiStateFile(ctx context.Context, obj BucketObj, bucket string, obj Name: path.Base(stackFile), // legacy logic to derive stack name from file name } if state.Version != 3 { - slog.Debug(fmt.Sprintln("Skipping Pulumi state with version", state.Version)) + slog.Debug(fmt.Sprint("Skipping Pulumi state with version", state.Version)) } else if len(state.Checkpoint.Latest.PendingOperations) > 0 { for _, op := range state.Checkpoint.Latest.PendingOperations { parts := strings.Split(op.Resource.Urn, "::") // prefix::project::type::resource => {urn:provider:stack}::{project}::{plugin:file:class}::{name} if len(parts) < 4 { - slog.Debug(fmt.Sprintln("Skipping pending operation with malformed URN:", op.Resource.Urn)) + slog.Debug(fmt.Sprint("Skipping pending operation with malformed URN:", op.Resource.Urn)) continue } stack.Pending = append(stack.Pending, parts[3]) diff --git a/src/pkg/cli/client/grpc_logger.go b/src/pkg/cli/client/grpc_logger.go index 3c2d1cc86..e38de4e58 100644 --- a/src/pkg/cli/client/grpc_logger.go +++ b/src/pkg/cli/client/grpc_logger.go @@ -39,7 +39,7 @@ func (g grpcLogger) logRequest(header http.Header, reqType, payload string) { requestId := pkg.RandomID() header.Set("X-Request-Id", requestId) - slog.Debug(fmt.Sprintln(g.prefix, requestId, reqType, payload)) + slog.Debug(fmt.Sprint(g.prefix, requestId, reqType, payload)) } func (g grpcLogger) WrapStreamingClient(next connect.StreamingClientFunc) connect.StreamingClientFunc { diff --git a/src/pkg/cli/client/playground.go b/src/pkg/cli/client/playground.go index ccbdecb62..7efeb4ecb 100644 --- a/src/pkg/cli/client/playground.go +++ b/src/pkg/cli/client/playground.go @@ -184,7 +184,7 @@ func (g *PlaygroundProvider) RemoteProjectName(ctx context.Context) (string, err if resp.Project == "" { return "", errors.New("no Playground projects found") } - slog.Debug(fmt.Sprintln("Using default Playground project: ", resp.Project)) + slog.Debug(fmt.Sprint("Using default Playground project: ", resp.Project)) return resp.Project, nil } diff --git a/src/pkg/cli/client/pretty_error.go b/src/pkg/cli/client/pretty_error.go index ba7911f7e..75d35f007 100644 --- a/src/pkg/cli/client/pretty_error.go +++ b/src/pkg/cli/client/pretty_error.go @@ -13,7 +13,7 @@ func PrettyError(err error) error { // To avoid printing the internal gRPC error code var cerr *connect.Error if errors.As(err, &cerr) { - slog.Debug(fmt.Sprintln("Server error:", cerr)) + slog.Debug(fmt.Sprint("Server error:", cerr)) err = errors.Unwrap(cerr) } if IsNetworkError(err) { diff --git a/src/pkg/cli/client/projectName.go b/src/pkg/cli/client/projectName.go index 09520dacb..72e452377 100644 --- a/src/pkg/cli/client/projectName.go +++ b/src/pkg/cli/client/projectName.go @@ -14,7 +14,7 @@ func LoadProjectNameWithFallback(ctx context.Context, loader Loader, provider Pr if err == nil { return projectName, nil } - slog.Debug(fmt.Sprintln("Failed to load local project:", err)) + slog.Debug(fmt.Sprint("Failed to load local project:", err)) loadErr = err } slog.Debug("Trying to get the remote project name from the provider") diff --git a/src/pkg/cli/compose/context.go b/src/pkg/cli/compose/context.go index 21726f604..256d476c4 100644 --- a/src/pkg/cli/compose/context.go +++ b/src/pkg/cli/compose/context.go @@ -220,7 +220,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec return fmt.Sprintf("s3://cd-preview/%s%s", service, archiveType.Extension), nil } - slog.InfoContext(ctx, fmt.Sprintln("Packaging the project files for", service, "at", root)) + slog.InfoContext(ctx, fmt.Sprint("Packaging the project files for", service, "at", root)) buffer, err := createArchive(ctx, build.Context, build.Dockerfile, archiveType) if err != nil { return "", err @@ -242,7 +242,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec panic("unexpected UploadMode value") } - slog.InfoContext(ctx, fmt.Sprintln("Uploading the project files for", service)) + slog.InfoContext(ctx, fmt.Sprint("Uploading the project files for", service)) return uploadArchive(ctx, provider, projectName, buffer, archiveType, digest) } @@ -298,7 +298,7 @@ func tryReadIgnoreFile(cwd, ignorefile string) io.ReadCloser { if err != nil { return nil } - slog.Debug(fmt.Sprintln("Reading .dockerignore file from", ignorefile)) + slog.Debug(fmt.Sprint("Reading .dockerignore file from", ignorefile)) return reader } @@ -307,7 +307,7 @@ func tryReadIgnoreFile(cwd, ignorefile string) io.ReadCloser { // Returns the filename of the written file and an error. func writeDefaultIgnoreFile(cwd string, dockerignore string) (string, error) { path := filepath.Join(cwd, dockerignore) - slog.Debug(fmt.Sprintln("Writing .dockerignore file to", path)) + slog.Debug(fmt.Sprint("Writing .dockerignore file to", path)) err := os.WriteFile(path, []byte(defaultDockerIgnore), 0644) if err != nil { @@ -413,7 +413,7 @@ func walkContextFolder(root, dockerfile string, writeIgnore writeIgnoreFile, fn return err } if ignore { - slog.Debug(fmt.Sprintln("Ignoring", relPath)) // TODO: avoid printing in this function + slog.Debug(fmt.Sprint("Ignoring", relPath)) // TODO: avoid printing in this function if de.IsDir() { return filepath.SkipDir } @@ -448,7 +448,7 @@ func createArchive(ctx context.Context, root string, dockerfile string, contentT doProgress := term.StdoutCanColor() && term.IsTerminal() err := walkContextFolder(root, dockerfile, writeIgnoreFileYes, func(path string, de os.DirEntry, slashPath string) error { if term.DoDebug() { - slog.Debug(fmt.Sprintln("Adding", slashPath)) + slog.Debug(fmt.Sprint("Adding", slashPath)) } else if doProgress { term.Printf("%4d %s\r", fileCount, slashPath) defer term.ClearLine() diff --git a/src/pkg/cli/composeUp.go b/src/pkg/cli/composeUp.go index a34ad6bae..5db652875 100644 --- a/src/pkg/cli/composeUp.go +++ b/src/pkg/cli/composeUp.go @@ -45,7 +45,7 @@ func checkDeploymentMode(prevMode, newMode modes.Mode) (modes.Mode, error) { switch newMode { case modes.ModeUnspecified: if prevMode != modes.ModeUnspecified { - slog.Debug(fmt.Sprintln("No deployment mode specified; using previous deployment mode:", prevMode)) + slog.Debug(fmt.Sprint("No deployment mode specified; using previous deployment mode:", prevMode)) newMode = prevMode } case modes.ModeAffordable: @@ -120,7 +120,7 @@ func ComposeUp(ctx context.Context, fabric client.FabricClient, provider client. Stack: provider.GetStackNameForDomain(), }) if err != nil { - slog.Debug(fmt.Sprintln("GetDelegateSubdomainZone failed:", err)) + slog.Debug(fmt.Sprint("GetDelegateSubdomainZone failed:", err)) return nil, project, errors.New("failed to get delegate domain") } @@ -209,7 +209,7 @@ func ComposeUp(ctx context.Context, fabric client.FabricClient, provider client. CdId: resp.CdId, }) if err != nil { - slog.Debug(fmt.Sprintln("Failed to record deployment:", err)) + slog.Debug(fmt.Sprint("Failed to record deployment:", err)) slog.WarnContext(ctx, "Unable to update deployment history; deployment will proceed anyway.") } diff --git a/src/pkg/cli/connect.go b/src/pkg/cli/connect.go index 46182ef34..1d4d4a72f 100644 --- a/src/pkg/cli/connect.go +++ b/src/pkg/cli/connect.go @@ -26,7 +26,7 @@ func ConnectWithTenant(ctx context.Context, fabricAddr string, requestedTenant t resp, err := grpcClient.WhoAmI(ctx) if err != nil { - slog.Debug(fmt.Sprintln("Unable to validate tenant with server:", err)) + slog.Debug(fmt.Sprint("Unable to validate tenant with server:", err)) return grpcClient, err } diff --git a/src/pkg/cli/logout.go b/src/pkg/cli/logout.go index c8d09eee0..de4bef6ee 100644 --- a/src/pkg/cli/logout.go +++ b/src/pkg/cli/logout.go @@ -19,7 +19,7 @@ func Logout(ctx context.Context, fabricClient client.FabricClient, fabricAddr st } if err := client.TokenStore.Delete(client.TokenStorageName(fabricAddr)); err != nil { - slog.WarnContext(ctx, fmt.Sprintln("Failed to remove stored token:", err)) + slog.WarnContext(ctx, fmt.Sprint("Failed to remove stored token:", err)) // Don't return the error - we still consider logout successful } @@ -27,9 +27,9 @@ func Logout(ctx context.Context, fabricClient client.FabricClient, fabricAddr st jwtFile, err := client.GetWebIdentityTokenFile(fabricAddr) if err == nil { if err := os.Remove(jwtFile); err != nil && !os.IsNotExist(err) { - slog.WarnContext(ctx, fmt.Sprintln("Failed to remove JWT token file:", err)) + slog.WarnContext(ctx, fmt.Sprint("Failed to remove JWT token file:", err)) } else if err == nil { - slog.Debug(fmt.Sprintln("Removed JWT token file:", jwtFile)) + slog.Debug(fmt.Sprint("Removed JWT token file:", jwtFile)) } } diff --git a/src/pkg/cli/tail.go b/src/pkg/cli/tail.go index 102ac9d64..654574690 100644 --- a/src/pkg/cli/tail.go +++ b/src/pkg/cli/tail.go @@ -242,7 +242,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin Limit: options.Limit, } - slog.Debug(fmt.Sprintln("Tail request:", tailRequest)) + slog.Debug(fmt.Sprint("Tail request:", tailRequest)) logSeq, err := provider.QueryLogs(ctx, tailRequest) if err != nil { @@ -291,7 +291,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if debug { debugStr = "ON" } - slog.InfoContext(ctx, fmt.Sprintln("Debug mode", debugStr)) + slog.InfoContext(ctx, fmt.Sprint("Debug mode", debugStr)) track.Evt("Debug Toggled", P("debug", debug)) case 'v', 'V': verbose := !options.Verbose @@ -303,7 +303,7 @@ func streamLogs(ctx context.Context, provider client.Provider, projectName strin if toggleCount++; toggleCount == 4 && !verbose { modeStr += ". I like the way you work it, no verbosity." } - slog.InfoContext(ctx, fmt.Sprintln("Verbose mode", modeStr)) + slog.InfoContext(ctx, fmt.Sprint("Verbose mode", modeStr)) track.Evt("Verbose Toggled", P("verbose", verbose), P("toggleCount", toggleCount)) } } @@ -377,7 +377,7 @@ func receiveLogs(ctx context.Context, provider client.Provider, projectName stri // Reconnect on transient errors if isTransientError(err) { - slog.Debug(fmt.Sprintln("Disconnected:", err)) + slog.Debug(fmt.Sprint("Disconnected:", err)) var spaces int if !options.Raw { slog.WarnContext(ctx, "Reconnecting...\r") @@ -390,7 +390,7 @@ func receiveLogs(ctx context.Context, provider client.Provider, projectName stri stop() // stop the old iterator newLogSeq, err := provider.QueryLogs(ctx, tailRequest) if err != nil { - slog.Debug(fmt.Sprintln("Reconnect failed:", err)) + slog.Debug(fmt.Sprint("Reconnect failed:", err)) return err } next, stop = iter.Pull2(newLogSeq) @@ -445,7 +445,7 @@ func handleLogEntryMsgs(msg *defangv1.TailResponse, doSpinner bool, skipDuplicat err := handler(e, options, term.DefaultTerm) if err != nil { - slog.Debug(fmt.Sprintln("Ending tail loop", err)) + slog.Debug(fmt.Sprint("Ending tail loop", err)) return err } diff --git a/src/pkg/cli/tailAndMonitor.go b/src/pkg/cli/tailAndMonitor.go index fbedc89eb..68a788dd3 100644 --- a/src/pkg/cli/tailAndMonitor.go +++ b/src/pkg/cli/tailAndMonitor.go @@ -73,7 +73,7 @@ func TailAndMonitor(ctx context.Context, project *compose.Project, provider clie // blocking call to tail var tailErr error if err := Tail(tailCtx, provider, project.Name, tailOptions); err != nil { - slog.Debug(fmt.Sprintln("Tail while monitoring stopped with", err, errors.Unwrap(err))) + slog.Debug(fmt.Sprint("Tail while monitoring stopped with", err, errors.Unwrap(err))) if connect.CodeOf(err) == connect.CodePermissionDenied { slog.WarnContext(ctx, "Unable to tail logs. Waiting for the deployment to finish.") diff --git a/src/pkg/cli/token.go b/src/pkg/cli/token.go index db30413b4..83e160d2a 100644 --- a/src/pkg/cli/token.go +++ b/src/pkg/cli/token.go @@ -21,7 +21,7 @@ func Token(ctx context.Context, client client.FabricClient, tenant types.TenantN } code, err := auth.StartAuthCodeFlow(ctx, false, func(token string) { - slog.Debug(fmt.Sprintln("Getting access token for scope:", s)) + slog.Debug(fmt.Sprint("Getting access token for scope:", s)) }, "token-cli") if err != nil { return err diff --git a/src/pkg/cli/whoami.go b/src/pkg/cli/whoami.go index 3200212f9..e6b50df2f 100644 --- a/src/pkg/cli/whoami.go +++ b/src/pkg/cli/whoami.go @@ -46,7 +46,7 @@ func Whoami(ctx context.Context, fabric client.FabricClient, maybeProvider clien if maybeProvider != nil { // Add provider account information if err := maybeProvider.Authenticate(ctx, false); err != nil { // Do not interactively login for whoami - slog.Debug(fmt.Sprintln("Unable to authenticate provider:", err)) + slog.Debug(fmt.Sprint("Unable to authenticate provider:", err)) } account, err := maybeProvider.AccountInfo(ctx) if err == nil { diff --git a/src/pkg/clouds/aws/codebuild/cfn/setup.go b/src/pkg/clouds/aws/codebuild/cfn/setup.go index 1856e1503..db87d60f1 100644 --- a/src/pkg/clouds/aws/codebuild/cfn/setup.go +++ b/src/pkg/clouds/aws/codebuild/cfn/setup.go @@ -94,7 +94,7 @@ func (a *AwsCfn) updateStackAndWait(ctx context.Context, templateBody string, fo return err // might call createStackAndWait depending on the error } - slog.InfoContext(ctx, fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be updated...")) // TODO: verbose only + slog.InfoContext(ctx, fmt.Sprint("Waiting for CloudFormation stack", a.stackName, "to be updated...")) // TODO: verbose only dso, err := cloudformation.NewStackUpdateCompleteWaiter(cfn, update1s).WaitForOutput(ctx, &cloudformation.DescribeStacksInput{ StackName: uso.StackId, }, stackTimeout) @@ -131,7 +131,7 @@ func (a *AwsCfn) createStackAndWait(ctx context.Context, templateBody string, pa } } - slog.InfoContext(ctx, fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be created...")) // TODO: verbose only + slog.InfoContext(ctx, fmt.Sprint("Waiting for CloudFormation stack", a.stackName, "to be created...")) // TODO: verbose only dso, err := cloudformation.NewStackCreateCompleteWaiter(cfn, create1s).WaitForOutput(ctx, &cloudformation.DescribeStacksInput{ StackName: ptr.String(a.stackName), }, stackTimeout) @@ -262,7 +262,7 @@ func (a *AwsCfn) TearDown(ctx context.Context) error { return err } - slog.InfoContext(ctx, fmt.Sprintln("Waiting for CloudFormation stack", a.stackName, "to be deleted...")) // TODO: verbose only + slog.InfoContext(ctx, fmt.Sprint("Waiting for CloudFormation stack", a.stackName, "to be deleted...")) // TODO: verbose only return cloudformation.NewStackDeleteCompleteWaiter(cfn, delete1s).Wait(ctx, &cloudformation.DescribeStacksInput{ StackName: ptr.String(a.stackName), }, stackTimeout) diff --git a/src/pkg/debug/debug_test.go b/src/pkg/debug/debug_test.go index ef2bb0972..9d6479da8 100644 --- a/src/pkg/debug/debug_test.go +++ b/src/pkg/debug/debug_test.go @@ -164,7 +164,7 @@ func TestDebugComposeLoadError(t *testing.T) { _, loadErr := loader.LoadProject(ctx) if loadErr != nil { - slog.Error(fmt.Sprintln("Cannot load project:", loadErr)) + slog.Error(fmt.Sprint("Cannot load project:", loadErr)) project, err := loader.CreateProjectForDebug() assert.NoError(t, err, "CreateProjectForDebug should not return an error") diff --git a/src/pkg/login/agree_tos.go b/src/pkg/login/agree_tos.go index 2a0a406c4..a452302c7 100644 --- a/src/pkg/login/agree_tos.go +++ b/src/pkg/login/agree_tos.go @@ -21,7 +21,7 @@ func InteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) error if client.TermsAccepted() { // The user has already agreed to the terms of service recently if err := nonInteractiveAgreeToS(ctx, fabric); err != nil { - slog.Debug(fmt.Sprintln("unable to agree to terms:", err)) // not fatal + slog.Debug(fmt.Sprint("unable to agree to terms:", err)) // not fatal } return nil } @@ -52,7 +52,7 @@ func NonInteractiveAgreeToS(ctx context.Context, fabric client.FabricClient) err // Persist the terms agreement in the state file so that we don't ask again if err := client.AcceptTerms(); err != nil { - slog.Debug(fmt.Sprintln("unable to persist terms agreement:", err)) // not fatal + slog.Debug(fmt.Sprint("unable to persist terms agreement:", err)) // not fatal } return nonInteractiveAgreeToS(ctx, fabric) diff --git a/src/pkg/login/login.go b/src/pkg/login/login.go index ab7555bb1..5a9346073 100644 --- a/src/pkg/login/login.go +++ b/src/pkg/login/login.go @@ -28,7 +28,7 @@ type AuthService interface { type OpenAuthService struct{} func (OpenAuthService) login(ctx context.Context, fabricAddr string, flow LoginFlow, mcpClient string) (string, error) { - slog.Debug(fmt.Sprintln("Logging in to", fabricAddr)) + slog.Debug(fmt.Sprint("Logging in to", fabricAddr)) code, err := auth.StartAuthCodeFlow(ctx, flow, func(token string) { client.SaveAccessToken(fabricAddr, token) @@ -130,7 +130,7 @@ func InteractiveRequireLoginAndToS(ctx context.Context, fabric client.FabricClie if err = fabric.CheckLoginAndToS(ctx); err != nil { // Login interactively now; only do this for authorization-related errors if connect.CodeOf(err) == connect.CodeUnauthenticated { - slog.Debug(fmt.Sprintln("Server error:", err)) + slog.Debug(fmt.Sprint("Server error:", err)) slog.WarnContext(ctx, "Please log in to continue.") term.ResetWarnings() // clear any previous warnings so we don't show them again diff --git a/src/pkg/mcp/mcp_server.go b/src/pkg/mcp/mcp_server.go index b2f65bc0f..f0fb252ca 100644 --- a/src/pkg/mcp/mcp_server.go +++ b/src/pkg/mcp/mcp_server.go @@ -34,7 +34,7 @@ func (t *ToolTracker) TrackTool(name string, handler server.ToolHandlerFunc) ser track.Evt("MCP Tool Called", track.P("tool", name), track.P("client", t.client), track.P("cluster", t.fabricAddr), track.P("provider", *t.providerId)) resp, err := handler(ctx, request) if err != nil { - slog.ErrorContext(ctx, fmt.Sprintln("MCP Tool Failed: "+name, "error", err)) + slog.ErrorContext(ctx, fmt.Sprint("MCP Tool Failed: "+name, "error", err)) } else { slog.Debug("MCP Tool Succeeded: " + name) } diff --git a/src/pkg/mcp/resources/resources.go b/src/pkg/mcp/resources/resources.go index a0c66b74b..a2b9e0f55 100644 --- a/src/pkg/mcp/resources/resources.go +++ b/src/pkg/mcp/resources/resources.go @@ -38,7 +38,7 @@ func setupDocumentationResource(s *server.MCPServer) { // Read the file file, err := os.ReadFile(knowledgeBasePath) if err != nil { - slog.ErrorContext(ctx, fmt.Sprintln("Failed to read resource file", "error", err, "path", "knowledge_base.json")) + slog.ErrorContext(ctx, fmt.Sprint("Failed to read resource file", "error", err, "path", "knowledge_base.json")) return nil, fmt.Errorf("failed to read resource file knowledge_base.json: %w", err) } @@ -68,7 +68,7 @@ func setupSamplesResource(s *server.MCPServer) { // Read the file file, err := os.ReadFile(samplesExamplesPath) if err != nil { - slog.ErrorContext(ctx, fmt.Sprintln("Failed to read resource file", "error", err, "path", "samples_examples.json")) + slog.ErrorContext(ctx, fmt.Sprint("Failed to read resource file", "error", err, "path", "samples_examples.json")) return nil, fmt.Errorf("failed to read resource file samples_examples.json: %w", err) } diff --git a/src/pkg/mcp/utils.go b/src/pkg/mcp/utils.go index 6cf6c87ff..42f249bb2 100644 --- a/src/pkg/mcp/utils.go +++ b/src/pkg/mcp/utils.go @@ -26,7 +26,7 @@ func SetupKnowledgeBase() error { // Create knowledge base directory if it doesn't exist slog.Debug("Creating knowledge base directory: " + KnowledgeBaseDir) if err := os.MkdirAll(KnowledgeBaseDir, 0700); err != nil { - slog.Error(fmt.Sprintln("Failed to create knowledge base directory", "error", err)) + slog.Error(fmt.Sprint("Failed to create knowledge base directory", "error", err)) return err } @@ -34,7 +34,7 @@ func SetupKnowledgeBase() error { slog.Debug("Downloading knowledge base file: " + filename) err := downloadKnowledgeBase(KnowledgeBaseDir+"/"+filename, "/"+DocumentationEndpoint+"/"+filename) if err != nil { - slog.Error(fmt.Sprintln("Failed to download knowledge base file", "error", err, "filename", filename)) + slog.Error(fmt.Sprint("Failed to download knowledge base file", "error", err, "filename", filename)) return err } } @@ -48,7 +48,7 @@ func downloadKnowledgeBase(filepath string, path string) (err error) { out, err := os.Create(filepath) slog.Debug("Creating file: " + filepath) if err != nil { - slog.Error(fmt.Sprintln("Failed to create file", "error", err, "filepath", filepath)) + slog.Error(fmt.Sprint("Failed to create file", "error", err, "filepath", filepath)) return err } defer out.Close() @@ -57,7 +57,7 @@ func downloadKnowledgeBase(filepath string, path string) (err error) { resp, err := http.Get(AskDefangBaseURL + path) slog.Debug("Downloading file: " + path) if err != nil { - slog.Error(fmt.Sprintln("Failed to download file", "error", err, "url", path)) + slog.Error(fmt.Sprint("Failed to download file", "error", err, "url", path)) return err } defer resp.Body.Close() @@ -65,7 +65,7 @@ func downloadKnowledgeBase(filepath string, path string) (err error) { // Check server response slog.Debug("Checking server response: " + resp.Status) if resp.StatusCode != http.StatusOK { - slog.Error(fmt.Sprintln("Failed to download file", "error", fmt.Errorf("bad status: %s", resp.Status), "url", path)) + slog.Error(fmt.Sprint("Failed to download file", "error", fmt.Errorf("bad status: %s", resp.Status), "url", path)) return fmt.Errorf("bad status: %s", resp.Status) } @@ -73,7 +73,7 @@ func downloadKnowledgeBase(filepath string, path string) (err error) { slog.Debug("Copying Using IO Copy: " + filepath) _, err = io.Copy(out, resp.Body) if err != nil { - slog.Error(fmt.Sprintln("Failed to write file", "error", err, "filepath", filepath)) + slog.Error(fmt.Sprint("Failed to write file", "error", err, "filepath", filepath)) return err } diff --git a/src/pkg/setup/setup.go b/src/pkg/setup/setup.go index d0264c4c1..1629d085f 100644 --- a/src/pkg/setup/setup.go +++ b/src/pkg/setup/setup.go @@ -244,7 +244,7 @@ func (s *SetupClient) MigrateFromHeroku(ctx context.Context) (SetupResult, error return SetupResult{}, fmt.Errorf("failed to write compose file: %w", err) } - slog.InfoContext(ctx, fmt.Sprintln("Compose file written to", composeFilePath)) + slog.InfoContext(ctx, fmt.Sprint("Compose file written to", composeFilePath)) slog.InfoContext(ctx, "Your application is now ready to deploy with Defang.") slog.InfoContext(ctx, "For next steps, visit https://s.defang.io/from-heroku") diff --git a/src/pkg/tokenstore/store.go b/src/pkg/tokenstore/store.go index fdcc639d8..e47383670 100644 --- a/src/pkg/tokenstore/store.go +++ b/src/pkg/tokenstore/store.go @@ -32,7 +32,7 @@ func (s *LocalDirTokenStore) Save(key string, token string) error { return err } - slog.Debug(fmt.Sprintln("Saving access token to", tokenFile)) + slog.Debug(fmt.Sprint("Saving access token to", tokenFile)) dir, _ := filepath.Split(tokenFile) if err := os.MkdirAll(dir, 0700); err != nil { return fmt.Errorf("failed to create token directory: %w", err) @@ -50,7 +50,7 @@ func (s *LocalDirTokenStore) Load(key string) (string, error) { if err != nil { return "", err } - slog.Debug(fmt.Sprintln("Reading access token from file", tokenFile)) + slog.Debug(fmt.Sprint("Reading access token from file", tokenFile)) all, err := os.ReadFile(tokenFile) if err != nil { return "", fmt.Errorf("failed to read token: %w", err) From 68a26c2824aea58c0c357f6a501060338124405b Mon Sep 17 00:00:00 2001 From: jordanstephens Date: Wed, 22 Apr 2026 11:34:59 -0700 Subject: [PATCH 7/7] fix: replace slog.Debug(fmt.Sprintf(...)) with structured logging --- src/cmd/cli/command/compose.go | 8 ++-- src/cmd/cli/command/estimate.go | 2 +- src/cmd/cli/command/generate.go | 4 +- src/cmd/cli/command/globals.go | 7 ++- src/cmd/cli/command/mcp.go | 6 +-- src/cmd/cli/command/session.go | 6 +-- src/cmd/cli/command/stack.go | 2 +- src/pkg/agent/common/common.go | 2 +- src/pkg/agent/generator.go | 3 +- src/pkg/agent/toolmanager.go | 2 +- src/pkg/agent/tools/deploy.go | 2 +- src/pkg/agent/tools/estimate.go | 2 +- src/pkg/agent/tools/services.go | 2 +- src/pkg/auth/auth.go | 4 +- src/pkg/cli/cd.go | 2 +- src/pkg/cli/cert.go | 22 ++++----- src/pkg/cli/client/byoc/aws/byoc.go | 20 ++++----- src/pkg/cli/client/byoc/aws/domain.go | 10 ++--- src/pkg/cli/client/byoc/aws/list.go | 4 +- src/pkg/cli/client/byoc/aws/stream.go | 3 +- src/pkg/cli/client/byoc/aws/subscribe.go | 3 +- src/pkg/cli/client/byoc/do/byoc.go | 2 +- src/pkg/cli/client/byoc/gcp/byoc.go | 20 ++++----- src/pkg/cli/client/byoc/gcp/stream.go | 8 ++-- src/pkg/cli/client/caniuse.go | 10 ++--- src/pkg/cli/client/cluster.go | 3 +- src/pkg/cli/compose/baseimage.go | 2 +- src/pkg/cli/compose/context.go | 2 +- src/pkg/cli/compose/dockerfile_validator.go | 4 +- src/pkg/cli/compose/fixup.go | 18 ++++---- src/pkg/cli/compose/loader.go | 4 +- src/pkg/cli/compose/serviceNameReplacer.go | 6 +-- src/pkg/cli/compose/validation.go | 50 ++++++++++----------- src/pkg/cli/composeDown.go | 3 +- src/pkg/cli/configDelete.go | 3 +- src/pkg/cli/configList.go | 3 +- src/pkg/cli/configSet.go | 2 +- src/pkg/cli/connect.go | 4 +- src/pkg/cli/estimate.go | 2 +- src/pkg/cli/getServices.go | 10 ++--- src/pkg/cli/new.go | 4 +- src/pkg/cli/subscribe.go | 8 ++-- src/pkg/cli/tail.go | 2 +- src/pkg/cli/token.go | 2 +- src/pkg/cli/upgrade.go | 5 +-- src/pkg/cli/waitForCdTaskExit.go | 2 +- src/pkg/clouds/aws/login.go | 24 +++++----- src/pkg/clouds/do/appPlatform/setup.go | 4 +- src/pkg/clouds/gcp/api.go | 6 +-- src/pkg/clouds/gcp/iam.go | 14 +++--- src/pkg/clouds/gcp/login.go | 14 +++--- src/pkg/clouds/gcp/storage.go | 2 +- src/pkg/github/version.go | 4 +- src/pkg/mcp/mcp_server.go | 2 +- src/pkg/mcp/utils.go | 2 +- src/pkg/migrate/heroku.go | 18 ++++---- src/pkg/migrate/migrate.go | 14 +++--- src/pkg/track/track.go | 5 +-- 58 files changed, 197 insertions(+), 207 deletions(-) diff --git a/src/cmd/cli/command/compose.go b/src/cmd/cli/command/compose.go index be9ad79bb..1c177c053 100644 --- a/src/cmd/cli/command/compose.go +++ b/src/cmd/cli/command/compose.go @@ -95,9 +95,9 @@ func makeComposeUpCmd() *cobra.Command { Type: defangv1.DeploymentType_DEPLOYMENT_TYPE_ACTIVE, Stack: session.Stack.Name, }); err != nil { - slog.Debug(fmt.Sprintf("ListDeployments failed: %v", err)) + slog.Debug("ListDeployments failed", "err", err) } else if accountInfo, err := session.Provider.AccountInfo(ctx); err != nil { - slog.Debug(fmt.Sprintf("AccountInfo failed: %v", err)) + slog.Debug("AccountInfo failed", "err", err) } else if len(resp.Deployments) > 0 { workingDir, _ := session.Loader.ProjectWorkingDir(ctx) confirmed, err := confirmDeployment(workingDir, resp.Deployments, accountInfo, session.Provider.GetStackName()) @@ -247,7 +247,7 @@ func confirmDeployment(targetDirectory string, existingDeployments []*defangv1.D Mode: global.Stack.Mode, }) if err != nil { - slog.Debug(fmt.Sprintf("Failed to create stack %v", err)) + slog.Debug("Failed to create stack", "err", err) } else { stacks.PrintCreateMessage(stackName) } @@ -452,7 +452,7 @@ func makeComposeDownCmd() *cobra.Command { slog.Warn("Stored project configs are not deleted.") } } else { - slog.Debug(fmt.Sprintf("ListConfigs failed: %v", err)) + slog.Debug("ListConfigs failed", "err", err) } if detach { diff --git a/src/cmd/cli/command/estimate.go b/src/cmd/cli/command/estimate.go index 53f0e0d01..192532d05 100644 --- a/src/cmd/cli/command/estimate.go +++ b/src/cmd/cli/command/estimate.go @@ -55,7 +55,7 @@ func makeEstimateCmd() *cobra.Command { if err != nil { return fmt.Errorf("failed to run estimate: %w", err) } - slog.Debug(fmt.Sprintf("Estimate: %+v", estimate)) + slog.Debug("Estimate", "estimate", estimate) cli.PrintEstimate(global.Stack.Mode, estimate, term.DefaultTerm) diff --git a/src/cmd/cli/command/generate.go b/src/cmd/cli/command/generate.go index e9f77e97d..7579741a1 100644 --- a/src/cmd/cli/command/generate.go +++ b/src/cmd/cli/command/generate.go @@ -57,7 +57,7 @@ func afterGenerate(ctx context.Context, result setup.SetupResult) { cmdd := exec.Command(editor, result.Folder) err := cmdd.Start() if err != nil { - slog.Debug(fmt.Sprintf("unable to launch editor %q: %v", editor, err)) + slog.Debug("unable to launch editor", "editor", editor, "err", err) } cd := "" @@ -69,7 +69,7 @@ func afterGenerate(ctx context.Context, result setup.SetupResult) { loader := compose.NewLoader(compose.WithPath(filepath.Join(result.Folder, "compose.yaml"))) project, err := loader.LoadProject(ctx) if err != nil { - slog.Debug(fmt.Sprintf("unable to load new project: %v", err)) + slog.Debug("unable to load new project", "err", err) } var envInstructions []string diff --git a/src/cmd/cli/command/globals.go b/src/cmd/cli/command/globals.go index 6d6b3353e..f30b54b90 100644 --- a/src/cmd/cli/command/globals.go +++ b/src/cmd/cli/command/globals.go @@ -1,7 +1,6 @@ package command import ( - "fmt" "log/slog" "os" "strconv" @@ -97,7 +96,7 @@ func NewGlobalConfig() *GlobalConfig { if fromEnv, ok := os.LookupEnv("DEFANG_COLOR"); ok { err := color.Set(fromEnv) if err != nil { - slog.Debug(fmt.Sprintf("invalid DEFANG_COLOR value: %v", err)) + slog.Debug("invalid DEFANG_COLOR value", "err", err) } } @@ -105,7 +104,7 @@ func NewGlobalConfig() *GlobalConfig { if fromEnv, ok := os.LookupEnv("DEFANG_PROVIDER"); ok { err := provider.Set(fromEnv) if err != nil { - slog.Debug(fmt.Sprintf("invalid DEFANG_PROVIDER value: %v", err)) + slog.Debug("invalid DEFANG_PROVIDER value", "err", err) } } @@ -113,7 +112,7 @@ func NewGlobalConfig() *GlobalConfig { if fromEnv, ok := os.LookupEnv("DEFANG_MODE"); ok { err := mode.Set(fromEnv) if err != nil { - slog.Debug(fmt.Sprintf("invalid DEFANG_MODE value: %v", err)) + slog.Debug("invalid DEFANG_MODE value", "err", err) } } diff --git a/src/cmd/cli/command/mcp.go b/src/cmd/cli/command/mcp.go index ca5446093..4b982151a 100644 --- a/src/cmd/cli/command/mcp.go +++ b/src/cmd/cli/command/mcp.go @@ -90,18 +90,18 @@ var mcpSetupCmd = &cobra.Command{ client = string(mcp.MCPClientWindsurf) } - slog.Debug(fmt.Sprintf("Using MCP client flag: %q", client)) + slog.Debug("Using MCP client flag", "client", client) if err := mcp.SetupClient(client); err != nil { return err } } else { - slog.Debug(fmt.Sprintf("Using MCP client picker: %q", client)) + slog.Debug("Using MCP client picker", "client", client) clients, err := mcp.SelectMCPclients() if err != nil { return err } for _, client := range clients { - slog.Debug(fmt.Sprintf("Selected MCP client using picker: %q", client)) + slog.Debug("Selected MCP client using picker", "client", client) if err := mcp.SetupClient(client); err != nil { return err diff --git a/src/cmd/cli/command/session.go b/src/cmd/cli/command/session.go index 2ce2caebe..5aef33ef1 100644 --- a/src/cmd/cli/command/session.go +++ b/src/cmd/cli/command/session.go @@ -42,7 +42,7 @@ func newCommandSessionWithOpts(cmd *cobra.Command, opts commandSessionOpts) (*se if !errors.Is(err, types.ErrComposeFileNotFound) { return nil, err } - slog.Debug(fmt.Sprintf("Could not create stack manager: %v", err)) + slog.Debug("Could not create stack manager", "err", err) } sessionLoader := session.NewSessionLoader(global.Client, sm, options) session, err := sessionLoader.LoadSession(ctx) @@ -114,7 +114,7 @@ func newStackManagerForLoader(ctx context.Context, loader *compose.Loader) (sess if !errors.Is(err, types.ErrComposeFileNotFound) { return nil, handleInvalidComposeFileErr(ctx, err) } - slog.Debug(fmt.Sprintf("Could not determine project working directory: %v", err)) + slog.Debug("Could not determine project working directory", "err", err) // No project directory; look for .defang directory in current or parent directories targetDirectory, _ = findTargetDirectory(".") } else { @@ -125,7 +125,7 @@ func newStackManagerForLoader(ctx context.Context, loader *compose.Loader) (sess } projectName, _, err := loader.LoadProjectName(ctx) if err != nil { - slog.Debug(fmt.Sprintf("Could not determine project name: %v", err)) + slog.Debug("Could not determine project name", "err", err) } sm, err := stacks.NewManager(global.Client, targetDirectory, projectName, ec) if err != nil { diff --git a/src/cmd/cli/command/stack.go b/src/cmd/cli/command/stack.go index e665d17cf..54595626c 100644 --- a/src/cmd/cli/command/stack.go +++ b/src/cmd/cli/command/stack.go @@ -93,7 +93,7 @@ func makeStackNewCmd() *cobra.Command { return fmt.Errorf("stack with name %q already exists in project %q", params.Name, projectName) } - slog.Debug(fmt.Sprintf("Creating stack with parameters: %+v\n", params)) + slog.Debug("Creating stack with parameters", "params", params) _, err = stacks.CreateInDirectory(".", params) if err != nil { diff --git a/src/pkg/agent/common/common.go b/src/pkg/agent/common/common.go index ce7dcba90..730cb00ce 100644 --- a/src/pkg/agent/common/common.go +++ b/src/pkg/agent/common/common.go @@ -48,7 +48,7 @@ func ConfigureAgentLoader(params LoaderParams) (*compose.Loader, error) { } composeFilePaths := params.ComposeFilePaths if len(composeFilePaths) > 0 { - slog.Debug(fmt.Sprintf("Compose file paths provided: %v", composeFilePaths)) + slog.Debug("Compose file paths provided", "paths", composeFilePaths) slog.Debug("Function invoked: compose.NewLoader") return compose.NewLoader(compose.WithPath(composeFilePaths...)), nil } diff --git a/src/pkg/agent/generator.go b/src/pkg/agent/generator.go index 9e1418304..c94f122f4 100644 --- a/src/pkg/agent/generator.go +++ b/src/pkg/agent/generator.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "log/slog" "github.com/firebase/genkit/go/ai" @@ -74,7 +73,7 @@ func (g *Generator) HandleMessage(ctx context.Context, prompt string, maxTurns i if errors.Is(err, context.Canceled) { return err } - slog.Debug(fmt.Sprintf("error: %v", err)) + slog.Debug("generate error", "err", err) continue } diff --git a/src/pkg/agent/toolmanager.go b/src/pkg/agent/toolmanager.go index eadca843f..fd8306fa4 100644 --- a/src/pkg/agent/toolmanager.go +++ b/src/pkg/agent/toolmanager.go @@ -124,7 +124,7 @@ func (t *ToolManager) EqualPrevious(toolRequests []*ai.ToolRequest) bool { for _, req := range toolRequests { inputs, err := json.Marshal(req.Input) if err != nil { - slog.Debug(fmt.Sprintf("error marshaling tool request input: %v", err)) + slog.Debug("error marshaling tool request input", "err", err) continue } currJSON := fmt.Sprintf("%s:%s", req.Name, inputs) diff --git a/src/pkg/agent/tools/deploy.go b/src/pkg/agent/tools/deploy.go index 6b6176413..806a1995e 100644 --- a/src/pkg/agent/tools/deploy.go +++ b/src/pkg/agent/tools/deploy.go @@ -57,7 +57,7 @@ func HandleDeployTool(ctx context.Context, loader client.Loader, params DeployPa } // Deploy the services - slog.Debug(fmt.Sprintf("Deploying services for project %s...", project.Name)) + slog.Debug("Deploying services for project", "project", project.Name) slog.Debug("Function invoked: cli.ComposeUp") // Use ComposeUp to deploy the services diff --git a/src/pkg/agent/tools/estimate.go b/src/pkg/agent/tools/estimate.go index ba5d48174..464354776 100644 --- a/src/pkg/agent/tools/estimate.go +++ b/src/pkg/agent/tools/estimate.go @@ -56,7 +56,7 @@ func HandleEstimateTool(ctx context.Context, loader client.Loader, params Estima if err != nil { return "", fmt.Errorf("failed to run estimate: %w", err) } - slog.Debug(fmt.Sprintf("Estimate: %+v", estimate)) + slog.Debug("Estimate", "estimate", estimate) estimateText := cli.PrintEstimate(deploymentMode, estimate) diff --git a/src/pkg/agent/tools/services.go b/src/pkg/agent/tools/services.go index d89d9c2d0..89bb956d9 100644 --- a/src/pkg/agent/tools/services.go +++ b/src/pkg/agent/tools/services.go @@ -68,7 +68,7 @@ func HandleServicesTool(ctx context.Context, loader client.Loader, params Servic // Convert to JSON jsonData, jsonErr := json.Marshal(serviceResponse) if jsonErr == nil { - slog.Debug(fmt.Sprintf("Successfully loaded services with count: %d", len(serviceResponse))) + slog.Debug("Successfully loaded services", "count", len(serviceResponse)) return string(jsonData) + "\nIf you would like to see more details about your deployed projects, please visit the Defang portal at https://portal.defang.io/projects", nil } diff --git a/src/pkg/auth/auth.go b/src/pkg/auth/auth.go index 8cbf5364b..bd6424189 100644 --- a/src/pkg/auth/auth.go +++ b/src/pkg/auth/auth.go @@ -117,7 +117,7 @@ func Poll(ctx context.Context, key string) ([]byte, error) { } var unexpectedError ErrUnexpectedStatus if errors.As(err, &unexpectedError) && unexpectedError.StatusCode >= 500 { - slog.Debug(fmt.Sprintf("received server error: %s, retrying in %v...", unexpectedError.Status, retryDelay)) + slog.Debug("received server error, retrying", "status", unexpectedError.Status, "retryDelay", retryDelay) select { case <-ctx.Done(): return nil, ctx.Err() @@ -162,7 +162,7 @@ func ExchangeCodeForToken(ctx context.Context, code AuthCodeFlow, ss ...scope.Sc scopes = append(scopes, s.String()) } - slog.Debug(fmt.Sprintf("Generating access token with scopes %v", scopes)) + slog.Debug("Generating access token", "scopes", scopes) token, err := OpenAuthClient.Exchange(code.code, code.redirectUri, code.verifier) // TODO: scope if err != nil { diff --git a/src/pkg/cli/cd.go b/src/pkg/cli/cd.go index 2f6b46078..e260fef0b 100644 --- a/src/pkg/cli/cd.go +++ b/src/pkg/cli/cd.go @@ -66,7 +66,7 @@ func CdCommand(ctx context.Context, projectName string, provider client.Provider StatesUrl: statesUrl, }) if err != nil { - slog.Debug(fmt.Sprintf("Failed to record deployment: %v", err)) + slog.Debug("Failed to record deployment", "err", err) slog.WarnContext(ctx, "Unable to update deployment history; deployment will proceed anyway.") } } diff --git a/src/pkg/cli/cert.go b/src/pkg/cli/cert.go index 65a79988f..93f5c5787 100644 --- a/src/pkg/cli/cert.go +++ b/src/pkg/cli/cert.go @@ -70,7 +70,7 @@ var ( ExpectContinueTimeout: 1 * time.Second, }, CheckRedirect: func(req *http.Request, via []*http.Request) error { - slog.Debug(fmt.Sprintf("Redirecting from %v to %v", via[len(via)-1].URL, req.URL)) + slog.Debug("Redirecting", "from", via[len(via)-1].URL, "to", req.URL) return nil }, } @@ -78,7 +78,7 @@ var ( ) func GenerateLetsEncryptCert(ctx context.Context, project *compose.Project, client client.FabricClient, provider client.Provider) error { - slog.Debug(fmt.Sprintf("Generating TLS cert for project %q", project.Name)) + slog.Debug("Generating TLS cert for project", "project", project.Name) services, err := provider.GetServices(ctx, &defangv1.GetServicesRequest{Project: project.Name}) if err != nil { @@ -104,7 +104,7 @@ func GenerateLetsEncryptCert(ctx context.Context, project *compose.Project, clie if defaultNetwork := service.Networks["default"]; defaultNetwork != nil { domains = append(domains, defaultNetwork.Aliases...) } - slog.Debug(fmt.Sprintf("Found service %v with domains %v and targets %v", service.Name, domains, targets)) + slog.Debug("Found service with domains and targets", "service", service.Name, "domains", domains, "targets", targets) for _, domain := range domains { generateCert(ctx, domain, targets, client) } @@ -177,7 +177,7 @@ func triggerCertGeneration(ctx context.Context, domain string) error { // Our own retry logic uses the root resolver to prevent cached DNS and retry on all non-200 errors if err := getWithRetries(ctx, fmt.Sprintf("http://%v", domain), 5); err != nil { // Retry incase of DNS error // Ignore possible tls error as cert attachment may take time - slog.Debug(fmt.Sprintf("Error triggering cert generation: %v", err)) + slog.Debug("Error triggering cert generation", "err", err) return err } return nil @@ -206,7 +206,7 @@ func waitForTLS(ctx context.Context, domain string) error { if err := cert.CheckTLSCert(timeout, domain); err == nil { return nil } else { - slog.Debug(fmt.Sprintf("Error checking TLS cert for %v: %v", domain, err)) + slog.Debug("Error checking TLS cert", "domain", domain, "err", err) } } } @@ -235,13 +235,13 @@ func waitForCNAME(ctx context.Context, domain string, targets []string, client c verifyDNS := func() error { if !serverSideVerified && serverVerifyRpcFailure < 3 { if err := client.VerifyDNSSetup(ctx, &defangv1.VerifyDNSSetupRequest{Domain: domain, Targets: targets}); err == nil { - slog.Debug(fmt.Sprintf("Server side DNS verification for %v successful", domain)) + slog.Debug("Server side DNS verification successful", "domain", domain) serverSideVerified = true } else { if cerr := new(connect.Error); errors.As(err, &cerr) && cerr.Code() == connect.CodeFailedPrecondition { - slog.Debug(fmt.Sprintf("Server side DNS verification negative result: %v", cerr.Message())) + slog.Debug("Server side DNS verification negative result", "message", cerr.Message()) } else { - slog.Debug(fmt.Sprintf("Server side DNS verification request for %v failed: %v", domain, err)) + slog.Debug("Server side DNS verification request failed", "domain", domain, "error", err) serverVerifyRpcFailure++ } } @@ -296,18 +296,18 @@ func getWithRetries(ctx context.Context, url string, tries int) error { return nil } if resp != nil && resp.Request != nil && resp.Request.URL.Scheme == "https" { - slog.Debug(fmt.Sprintf("cert gen request success, received redirect to %v", resp.Request.URL)) + slog.Debug("cert gen request success, received redirect", "url", resp.Request.URL) return nil // redirect to https indicate a successful cert generation } if err == nil { err = fmt.Errorf("HTTP: %v", resp.StatusCode) } } else if cve := new(tls.CertificateVerificationError); errors.As(err, &cve) { - slog.Debug(fmt.Sprintf("cert gen request success, received tls error: %v", cve)) + slog.Debug("cert gen request success, received tls error", "err", cve) return nil // tls error indicate a successful cert gen trigger, as it has to be redirected to https } - slog.Debug(fmt.Sprintf("Error fetching %v: %v, tries left %v", url, err, tries-i-1)) + slog.Debug("Error fetching url", "url", url, "err", err, "triesLeft", tries-i-1) errs = append(errs, err) delay := httpRetryDelayBase << i // Simple exponential backoff diff --git a/src/pkg/cli/client/byoc/aws/byoc.go b/src/pkg/cli/client/byoc/aws/byoc.go index 11d7e5603..aee131df8 100644 --- a/src/pkg/cli/client/byoc/aws/byoc.go +++ b/src/pkg/cli/client/byoc/aws/byoc.go @@ -155,7 +155,7 @@ func (b *ByocAws) SetUpCD(ctx context.Context, force bool) error { return nil } - slog.Debug(fmt.Sprintf("Using CD image: %q", b.CDImage)) + slog.Debug("Using CD image", "image", b.CDImage) _, err := b.driver.SetUp(ctx, force) if err != nil { @@ -270,10 +270,10 @@ func (b *ByocAws) deploy(ctx context.Context, req *client.DeployRequest, cmd str slog.Debug("Docker Hub credentials are needed for image pulls") dockerHubUser, dockerHubPass, err := dockerhub.GetDockerHubCredentials(ctx) if err != nil { - slog.Debug(fmt.Sprintf("Could not retrieve Docker Hub credentials: %v", err)) + slog.Debug("Could not retrieve Docker Hub credentials", "err", err) slog.WarnContext(ctx, "Docker Hub credentials are required to avoid pull throttling. Please run `docker login` or set the DOCKERHUB_USERNAME and DOCKERHUB_TOKEN environment variables. Without valid credentials, image pulls may be rate-limited or fail.") } else { - slog.Debug(fmt.Sprintf("Using Docker Hub credentials with user %v", dockerHubUser)) + slog.Debug("Using Docker Hub credentials", "user", dockerHubUser) cdCmd.dockerHubUsername = dockerHubUser cdCmd.dockerHubAccessToken = dockerHubPass } @@ -365,7 +365,7 @@ func (b *ByocAws) checkRequiresDockerHubToken(ctx context.Context, project *comp found, err := b.driver.CheckImageExistOnPublicECR(ctx, ecrRepo, tag) if err != nil { - slog.Debug(fmt.Sprintf("Error checking image %q on Public ECR: %v, assuming credentials needed", image, err)) + slog.Debug("Error checking image on Public ECR, assuming credentials needed", "image", image, "err", err) found = false } if !found { @@ -379,7 +379,7 @@ func (b *ByocAws) checkRequiresDockerHubToken(ctx context.Context, project *comp } if len(missingDockerhubImages) > 0 { b.needDockerHubCreds = true - slog.Debug(fmt.Sprintf("Docker Hub images not found on Public ECR: %v", missingDockerhubImages)) + slog.Debug("Docker Hub images not found on Public ECR", "images", missingDockerhubImages) track.Evt("NeedsDockerHubCreds", track.P("images", strings.Join(missingDockerhubImages, ","))) } return nil @@ -596,7 +596,7 @@ func (b *ByocAws) GetProjectUpdate(ctx context.Context, projectName string) (*de // FillOutputs might fail if the stack is not created yet; return ErrNotExist (no bucket = no services yet) var cfnErr *cfn.ErrStackNotFoundException if errors.As(err, &cfnErr) { - slog.Debug(fmt.Sprintf("FillOutputs: %v", err)) + slog.Debug("FillOutputs", "err", err) return nil, client.ErrNotExist // no bucket = no services yet } return nil, AnnotateAwsError(err) @@ -659,14 +659,14 @@ func (b *ByocAws) getSecretID(projectName, name string) string { func (b *ByocAws) PutConfig(ctx context.Context, secret *defangv1.PutConfigRequest) error { fqn := b.getSecretID(secret.Project, secret.Name) - slog.Debug(fmt.Sprintf("Putting parameter %q", fqn)) + slog.Debug("Putting parameter", "fqn", fqn) err := b.driver.PutSecret(ctx, fqn, secret.Value) return AnnotateAwsError(err) } func (b *ByocAws) ListConfig(ctx context.Context, req *defangv1.ListConfigsRequest) (*defangv1.Secrets, error) { prefix := b.getSecretID(req.Project, "") - slog.Debug(fmt.Sprintf("Listing parameters with prefix %q", prefix)) + slog.Debug("Listing parameters with prefix", "prefix", prefix) awsSecrets, err := b.driver.ListSecretsByPrefix(ctx, prefix) if err != nil { return nil, err @@ -740,7 +740,7 @@ func (b *ByocAws) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (ite // Ignore ResourceNotFoundException errors which can only happen if a log stream is missing during Query var resourceNotFound *cwTypes.ResourceNotFoundException if errors.As(err, &resourceNotFound) { - slog.Debug(fmt.Sprintf("Log stream not found while tailing, skipping: %v", err)) + slog.Debug("Log stream not found while tailing, skipping", "err", err) continue } if !yield(nil, AnnotateAwsError(err)) { @@ -818,7 +818,7 @@ func (b *ByocAws) queryOrTailLogs(ctx context.Context, cwClient cw.LogsClient, r if len(req.Services) == 0 { albIter, err := b.fetchAndStreamAlbLogs(ctx, req.Project, start, end, req.Pattern) if err != nil { - slog.Debug(fmt.Sprintf("Failed to fetch ALB logs: %v", err)) + slog.Debug("Failed to fetch ALB logs", "err", err) } else { logSeq = cw.MergeLogEvents(logSeq, albIter) if req.Limit > 0 { diff --git a/src/pkg/cli/client/byoc/aws/domain.go b/src/pkg/cli/client/byoc/aws/domain.go index 9649a565e..d2d076436 100644 --- a/src/pkg/cli/client/byoc/aws/domain.go +++ b/src/pkg/cli/client/byoc/aws/domain.go @@ -28,7 +28,7 @@ func prepareDomainDelegation(ctx context.Context, projectDomain, projectName, st if !errors.Is(err, aws.ErrZoneNotFound) { return nil, "", err // TODO: we should not fail deployment if GetHostedZonesByName fails } - slog.Debug(fmt.Sprintf("Zone %q not found, delegation set will be created", projectDomain)) + slog.Debug("Zone not found, delegation set will be created", "domain", projectDomain) } else { // Case 2: Get the NS records for the existing subdomain zone delegationSet, err = getOrCreateDelegationSetByZones(ctx, zones, projectName, stackName, r53Client) @@ -88,7 +88,7 @@ func findUsableDelegationSet(ctx context.Context, domain string, r53Client aws.R if len(hostedZones) >= 100 { // A delegation set can only be associated with up to 100 hosted zones by default // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-entities-hosted-zones) - slog.Debug(fmt.Sprintf("Delegation set %q has reached the maximum number of hosted zones (100), skipping", *delegationSet.Id)) + slog.Debug("Delegation set has reached the maximum number of hosted zones (100), skipping", "delegationSetId", *delegationSet.Id) continue } return &delegationSet, nil @@ -120,7 +120,7 @@ func createUsableDelegationSet(ctx context.Context, domain string, r53Client aws // up to 100 delegation sets can be created per account, failure is non-fatal // there is no direct actionable remedy for the user too. // TODO: find and reuse empty delegation sets to avoid hitting the limit - slog.Debug(fmt.Sprintf("Failed to delete conflicting delegation set %q: %v", *delegationSet.Id, err)) + slog.Debug("Failed to delete conflicting delegation set", "delegationSetId", *delegationSet.Id, "err", err) } } else { return delegationSet, nil @@ -138,7 +138,7 @@ func nameServersHasConflict(ctx context.Context, nameServers []string, domains [ return false, err } else if len(records) > 0 { // Records found, meaning the NS server is conflicting - slog.Debug(fmt.Sprintf("Name server %q has conflicting records for domain %q: %v", nsServer, domain, records)) + slog.Debug("Name server has conflicting records for domain", "nsServer", nsServer, "domain", domain, "records", records) return true, nil } } @@ -156,7 +156,7 @@ func getOrCreateDelegationSetByZones(ctx context.Context, zones []*types.HostedZ } // Ignore zones that were created by an older CLI (2a), or another way (2c) or belong to a different project/stack (2d) if tags["defang:project"] != projectName || tags["defang:stack"] != stackName { - slog.Debug(fmt.Sprintf("ignored zone %q as it belongs to a different project/stack (%q/%q), skipping", projectDomain, tags["defang:project"], tags["defang:stack"])) + slog.Debug("ignored zone as it belongs to a different project/stack, skipping", "domain", projectDomain, "project", tags["defang:project"], "stack", tags["defang:stack"]) continue } diff --git a/src/pkg/cli/client/byoc/aws/list.go b/src/pkg/cli/client/byoc/aws/list.go index 80b36c605..b346399d9 100644 --- a/src/pkg/cli/client/byoc/aws/list.go +++ b/src/pkg/cli/client/byoc/aws/list.go @@ -86,7 +86,7 @@ func ListPulumiStacks(ctx context.Context, s3client S3Client, bucketName string) return io.ReadAll(getObjectOutput.Body) }) if err != nil { - slog.Debug(fmt.Sprintf("Skipping %q in bucket %s: %v", *obj.Key, bucketName, AnnotateAwsError(err))) + slog.Debug("Skipping object in bucket", "key", *obj.Key, "bucket", bucketName, "err", AnnotateAwsError(err)) continue } if state != nil { @@ -128,7 +128,7 @@ func (b *ByocAws) listPulumiStacksAllRegions(ctx context.Context, s3client S3Cli Bucket: bucket.Name, }) if err != nil { - slog.Debug(fmt.Sprintf("Skipping bucket %s: failed to get location: %v", *bucket.Name, AnnotateAwsError(err))) + slog.Debug("Skipping bucket: failed to get location", "bucket", *bucket.Name, "err", AnnotateAwsError(err)) continue } diff --git a/src/pkg/cli/client/byoc/aws/stream.go b/src/pkg/cli/client/byoc/aws/stream.go index d1fcabfbf..e78146cb5 100644 --- a/src/pkg/cli/client/byoc/aws/stream.go +++ b/src/pkg/cli/client/byoc/aws/stream.go @@ -2,7 +2,6 @@ package aws import ( "encoding/json" - "fmt" "log/slog" "regexp" "slices" @@ -124,7 +123,7 @@ func (p *logEventParser) parseEvents(events []cw.LogEvent) *defangv1.TailRespons } else if parseECSEventRecords { evt, err := ecs.ParseECSEvent([]byte(*event.Message)) if err != nil { - slog.Debug(fmt.Sprintf("error parsing ECS event, output raw event log: %v", err)) + slog.Debug("error parsing ECS event, output raw event log", "err", err) } else { entry.Service = evt.Service() entry.Etag = evt.Etag() diff --git a/src/pkg/cli/client/byoc/aws/subscribe.go b/src/pkg/cli/client/byoc/aws/subscribe.go index e61de1dde..32a1606fe 100644 --- a/src/pkg/cli/client/byoc/aws/subscribe.go +++ b/src/pkg/cli/client/byoc/aws/subscribe.go @@ -1,7 +1,6 @@ package aws import ( - "fmt" "iter" "log/slog" "slices" @@ -55,7 +54,7 @@ func parseSubscribeEvent(evt cw.LogEvent, etag types.ETag, services []string) *d func parseECSSubscribeEvent(evt cw.LogEvent, etag types.ETag, services []string) *defangv1.SubscribeResponse { ecsEvt, err := ecs.ParseECSEvent([]byte(*evt.Message)) if err != nil { - slog.Debug(fmt.Sprintf("error parsing ECS event: %v", err)) + slog.Debug("error parsing ECS event", "err", err) return nil } diff --git a/src/pkg/cli/client/byoc/do/byoc.go b/src/pkg/cli/client/byoc/do/byoc.go index fdec9250a..4bdd0b306 100644 --- a/src/pkg/cli/client/byoc/do/byoc.go +++ b/src/pkg/cli/client/byoc/do/byoc.go @@ -456,7 +456,7 @@ func (b *ByocDo) QueryLogs(ctx context.Context, req *defangv1.TailRequest) (iter logType := logs.LogType(req.LogType) - slog.Debug(fmt.Sprintf("Deployment phase: %s", deploymentInfo.GetPhase())) + slog.Debug("Deployment phase", "phase", deploymentInfo.GetPhase()) switch deploymentInfo.GetPhase() { case godo.DeploymentPhase_PendingBuild, godo.DeploymentPhase_PendingDeploy, godo.DeploymentPhase_Deploying: // Do nothing; check again in 10 seconds diff --git a/src/pkg/cli/client/byoc/gcp/byoc.go b/src/pkg/cli/client/byoc/gcp/byoc.go index f907ae48f..b9431f7ee 100644 --- a/src/pkg/cli/client/byoc/gcp/byoc.go +++ b/src/pkg/cli/client/byoc/gcp/byoc.go @@ -283,7 +283,7 @@ func (b *ByocGcp) SetUpCD(ctx context.Context, force bool) error { } } - slog.Debug(fmt.Sprintf("Using CD image: %q", b.CDImage)) + slog.Debug("Using CD image", "image", b.CDImage) b.SetupDone = true return nil @@ -322,12 +322,12 @@ func (b *ByocGcp) CdList(ctx context.Context, _allRegions bool) (iter.Seq[state. return func(yield func(state.Info) bool) { for obj, err := range seq { if err != nil { - slog.Debug(fmt.Sprintf("Error listing object in bucket %s: %v", bucketName, annotateGcpError(err))) + slog.Debug("Error listing object in bucket", "bucket", bucketName, "err", annotateGcpError(err)) continue } st, err := state.ParsePulumiStateFile(ctx, gcpObj{obj}, bucketName, objLoader) if err != nil { - slog.Debug(fmt.Sprintf("Skipping %q in bucket %s: %v", obj.Name, bucketName, annotateGcpError(err))) + slog.Debug("Skipping object in bucket", "object", obj.Name, "bucket", bucketName, "err", annotateGcpError(err)) continue } if st == nil { @@ -488,7 +488,7 @@ func (b *ByocGcp) runCdCommand(ctx context.Context, cmd cdCommand) (string, erro if err != nil { return "", err } - slog.Debug(fmt.Sprintf("Starting CD in cloudbuild at: %v", time.Now().Format(time.RFC3339))) + slog.Debug("Starting CD in cloudbuild", "at", time.Now().Format(time.RFC3339)) buildId, err := b.driver.RunCloudBuild(ctx, gcp.CloudBuildArgs{ Steps: string(steps), ServiceAccount: &b.cdServiceAccount, @@ -711,7 +711,7 @@ func (b *ByocGcp) PrepareDomainDelegation(ctx context.Context, req client.Prepar return nil, annotateGcpError(err) } else { b.delegateDomainZone = zone.Name - slog.Debug(fmt.Sprintf("Zone %s created with nameservers %v", zone.Name, zone.NameServers)) + slog.Debug("Zone created with nameservers", "zone", zone.Name, "nameservers", zone.NameServers) return &client.PrepareDomainDelegationResponse{ NameServers: zone.NameServers, }, nil @@ -721,7 +721,7 @@ func (b *ByocGcp) PrepareDomainDelegation(ctx context.Context, req client.Prepar func (b *ByocGcp) DeleteConfig(ctx context.Context, req *defangv1.Secrets) error { for _, name := range req.Names { secretId := b.resourceName(req.Project, name) - slog.Debug(fmt.Sprintf("Deleting secret %q", secretId)) + slog.Debug("Deleting secret", "secretId", secretId) if err := b.driver.DeleteSecret(ctx, secretId); err != nil { return fmt.Errorf("failed to delete secret %q: %w", secretId, err) } @@ -750,7 +750,7 @@ func (b *ByocGcp) ListConfig(ctx context.Context, req *defangv1.ListConfigsReque func (b *ByocGcp) PutConfig(ctx context.Context, req *defangv1.PutConfigRequest) error { secretId := b.resourceName(req.Project, req.Name) - slog.Debug(fmt.Sprintf("Creating secret %q", secretId)) + slog.Debug("Creating secret", "secretId", secretId) if _, err := b.driver.CreateSecret(ctx, secretId); err != nil { if gcp.IsAccessNotEnabled(err) { @@ -761,13 +761,13 @@ func (b *ByocGcp) PutConfig(ctx context.Context, req *defangv1.PutConfigRequest) } if err != nil { if stat, ok := status.FromError(err); ok && stat.Code() == codes.AlreadyExists { - slog.Debug(fmt.Sprintf("Secret %q already exists", secretId)) + slog.Debug("Secret already exists", "secretId", secretId) } else { return fmt.Errorf("failed to create secret %q: %w", secretId, err) } } } - slog.Debug(fmt.Sprintf("Adding a new secret version for %q", secretId)) + slog.Debug("Adding a new secret version", "secretId", secretId) if _, err := b.driver.AddSecretVersion(ctx, secretId, []byte(req.Value)); err != nil { return fmt.Errorf("failed to add secret version for %q: %w", secretId, err) } @@ -846,7 +846,7 @@ func (b *ByocGcp) GetProjectUpdate(ctx context.Context, projectName string) (*de slog.Debug(fmt.Sprint("Getting services from bucket:", bucketName, path, uploadSA)) pbBytes, err := b.driver.GetBucketObjectWithServiceAccount(ctx, bucketName, path, uploadSA) if err != nil { - slog.Debug(fmt.Sprintf("Failed to get project bucket object from bucket %q at path %q with service account %q: %v", bucketName, path, uploadSA, err)) + slog.Debug("Failed to get project bucket object", "bucket", bucketName, "path", path, "serviceAccount", uploadSA, "err", err) // Handle the case where the object does not exist, or where we do not have permission to view the object, ie. // "Permission 'iam.serviceAccounts.getAccessToken' denied on resource (or it may not exist)." #2051 if errors.Is(err, gcp.ErrObjectNotExist) || strings.Contains(err.Error(), "(or it may not exist)") { diff --git a/src/pkg/cli/client/byoc/gcp/stream.go b/src/pkg/cli/client/byoc/gcp/stream.go index 4407c336c..13d4715fd 100644 --- a/src/pkg/cli/client/byoc/gcp/stream.go +++ b/src/pkg/cli/client/byoc/gcp/stream.go @@ -71,7 +71,7 @@ func (s *ServerStream[T]) Follow(start time.Time) (iter.Seq2[*T, error], error) } query := s.query.GetQuery() shouldList := !start.IsZero() && start.Unix() > 0 && time.Since(start) > 10*time.Millisecond - slog.Debug(fmt.Sprintf("Query and tail logs since %v with query: \n%v", start, query)) + slog.Debug("Query and tail logs", "since", start, "query", query) return func(yield func(*T, error) bool) { defer tailer.Close() // Only query older logs if start time is more than 10ms ago @@ -126,7 +126,7 @@ func (s *ServerStream[T]) Follow(start time.Time) (iter.Seq2[*T, error], error) // Head returns an iterator that queries logs in ascending order. func (s *ServerStream[T]) Head(limit int32) iter.Seq2[*T, error] { query := s.query.GetQuery() - slog.Debug(fmt.Sprintf("Query logs with query: \n%v", query)) + slog.Debug("Query logs", "query", query) return func(yield func(*T, error) bool) { lister, err := s.gcpLogsClient.ListLogEntries(s.ctx, query, gcp.OrderAscending) if err != nil { @@ -140,7 +140,7 @@ func (s *ServerStream[T]) Head(limit int32) iter.Seq2[*T, error] { // Tail returns an iterator that queries logs in descending order, reversing if a limit is set. func (s *ServerStream[T]) Tail(limit int32) iter.Seq2[*T, error] { query := s.query.GetQuery() - slog.Debug(fmt.Sprintf("Query logs with query: \n%v", query)) + slog.Debug("Query logs", "query", query) return func(yield func(*T, error) bool) { lister, err := s.gcpLogsClient.ListLogEntries(s.ctx, query, gcp.OrderDescending) if err != nil { @@ -622,7 +622,7 @@ func getActivityParser(ctx context.Context, gcpLogsClient GcpLogsClient, waitFor rootTriggerId := entry.GetLabels()["compute.googleapis.com/root_trigger_id"] serviceName, ok := computeEngineRootTriggers[rootTriggerId] if !ok { - slog.Debug(fmt.Sprintf("ignored root trigger id %v for instance group insert", rootTriggerId)) + slog.Debug("ignored root trigger id for instance group insert", "rootTriggerId", rootTriggerId) return nil, nil } response := auditLog.GetResponse() diff --git a/src/pkg/cli/client/caniuse.go b/src/pkg/cli/client/caniuse.go index f1effdadc..cb9a8b37e 100644 --- a/src/pkg/cli/client/caniuse.go +++ b/src/pkg/cli/client/caniuse.go @@ -66,24 +66,24 @@ type versionLabel string // resolveVersion picks the version to use: env override > force upgrade > allow upgrade > pin to previous > latest. func resolveVersion(fromEnv, fromFabric, previous string, label versionLabel, allowUpgrade bool, forcedReason string) string { if fromEnv != "" { - slog.Debug(fmt.Sprintf("Using %s from env: %s", label, fromEnv)) + slog.Debug("Using version from env", "label", label, "version", fromEnv) return fromEnv } if previous == "" || fromFabric == previous { - slog.Debug(fmt.Sprintf("Using %s: %s", label, fromFabric)) + slog.Debug("Using version from fabric", "label", label, "version", fromFabric) return fromFabric } if forcedReason != "" { - slog.Debug(fmt.Sprintf("Using %s from fabric: %s", label, fromFabric)) + slog.Debug("Using version from fabric (forced)", "label", label, "version", fromFabric) slog.Warn(fmt.Sprintf("Overriding %s: %s", label, forcedReason)) return fromFabric } if allowUpgrade { - slog.Debug(fmt.Sprintf("Using latest %s: %s", label, fromFabric)) + slog.Debug("Using latest version from fabric", "label", label, "version", fromFabric) slog.Info(fmt.Sprintf("Upgrading %s to latest", label)) return fromFabric } - slog.Debug(fmt.Sprintf("Using previous %s: %s", label, previous)) + slog.Debug("Using previous version", "label", label, "version", previous) slog.Warn(fmt.Sprintf("A newer %s is available; using previously deployed version. To upgrade, re-run with --allow-upgrade or set DEFANG_ALLOW_UPGRADE=1", label)) return previous } diff --git a/src/pkg/cli/client/cluster.go b/src/pkg/cli/client/cluster.go index 4e64bba3d..25f01daff 100644 --- a/src/pkg/cli/client/cluster.go +++ b/src/pkg/cli/client/cluster.go @@ -1,7 +1,6 @@ package client import ( - "fmt" "log/slog" "net" "os" @@ -48,7 +47,7 @@ func GetExistingToken(fabricAddr string) string { var err error accessToken, err = TokenStore.Load(TokenStorageName(fabricAddr)) if err != nil { - slog.Debug(fmt.Sprintf("failed to load access token for %v: %v", fabricAddr, err)) + slog.Debug("failed to load access token", "fabricAddr", fabricAddr, "err", err) } // Check if we wrote an IDToken file during login, if AWS_WEB_IDENTITY_TOKEN_FILE is empty, diff --git a/src/pkg/cli/compose/baseimage.go b/src/pkg/cli/compose/baseimage.go index 858b174e3..1ddf2385c 100644 --- a/src/pkg/cli/compose/baseimage.go +++ b/src/pkg/cli/compose/baseimage.go @@ -25,7 +25,7 @@ func FindAllBaseImages(project *composeTypes.Project) ([]string, error) { images, err := extractDockerfileBaseImages(dockerfileFullPath) if err != nil { if os.IsNotExist(err) { - slog.Debug(fmt.Sprintf("service %q: dockerfile %q does not exist; skipping", service.Name, dockerfileFullPath)) + slog.Debug("service: dockerfile does not exist; skipping", "service", service.Name, "dockerfile", dockerfileFullPath) continue } return nil, err diff --git a/src/pkg/cli/compose/context.go b/src/pkg/cli/compose/context.go index 256d476c4..eb3cc290f 100644 --- a/src/pkg/cli/compose/context.go +++ b/src/pkg/cli/compose/context.go @@ -231,7 +231,7 @@ func getRemoteBuildContext(ctx context.Context, provider client.Provider, projec case UploadModeDefault, UploadModeDigest: // Calculate the digest of the tarball and pass it to the fabric controller (to avoid building the same image twice) digest = calcDigest(buffer.Bytes()) - slog.Debug(fmt.Sprintf("Digest for %q: %s", service, digest)) + slog.Debug("Digest for service", "service", service, "digest", digest) case UploadModePreview: // For preview, we invoke the CD "preview" command, which will want a valid (S3) URL for diff, even though it won't be used digest = calcDigest(buffer.Bytes()) diff --git a/src/pkg/cli/compose/dockerfile_validator.go b/src/pkg/cli/compose/dockerfile_validator.go index f21e4f44e..d722936ea 100644 --- a/src/pkg/cli/compose/dockerfile_validator.go +++ b/src/pkg/cli/compose/dockerfile_validator.go @@ -35,7 +35,7 @@ func (e *DockerfileValidationError) Unwrap() error { // ValidateDockerfile validates the syntax and basic structure of a Dockerfile func ValidateDockerfile(dockerfilePath string, serviceName string) error { - slog.Debug(fmt.Sprintf("Validating Dockerfile: %s for service %q", dockerfilePath, serviceName)) + slog.Debug("Validating Dockerfile for service", "dockerfile", dockerfilePath, "service", serviceName) // Read the Dockerfile content, err := os.ReadFile(dockerfilePath) @@ -161,7 +161,7 @@ func ValidateServiceDockerfiles(project *Project) error { if os.IsNotExist(err) { // This might be handled later by Railpack or may be a remote context // Only validate if the file exists - slog.Debug(fmt.Sprintf("Skipping validation for service %q: Dockerfile %q does not exist", service.Name, dockerfilePath)) + slog.Debug("Skipping validation for service: Dockerfile does not exist", "service", service.Name, "dockerfile", dockerfilePath) continue } errors = append(errors, &DockerfileValidationError{ diff --git a/src/pkg/cli/compose/fixup.go b/src/pkg/cli/compose/fixup.go index a8235e0e5..b6e4ce169 100644 --- a/src/pkg/cli/compose/fixup.go +++ b/src/pkg/cli/compose/fixup.go @@ -32,14 +32,14 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // Preload the current config so we can detect which environment variables should be passed as "secrets" config, err := provider.ListConfig(ctx, &defangv1.ListConfigsRequest{Project: project.Name}) if err != nil { - slog.Debug(fmt.Sprintf("failed to load config: %v", err)) + slog.Debug("failed to load config", "err", err) config = &defangv1.Secrets{} } slices.Sort(config.Names) // sort for binary search accountInfo, err := provider.AccountInfo(ctx) if err != nil { - slog.Debug(fmt.Sprintf("failed to get account info to fixup services: %v", err)) + slog.Debug("failed to get account info to fixup services", "err", err) accountInfo = &client.AccountInfo{} } @@ -107,7 +107,7 @@ func FixupServices(ctx context.Context, provider client.Provider, project *compo // Check if the dockerfile exists dockerfilePath := filepath.Join(svccfg.Build.Context, svccfg.Build.Dockerfile) if _, err := os.Stat(dockerfilePath); err != nil { - slog.Debug(fmt.Sprintf("stat %q: %v", dockerfilePath, err)) + slog.Debug("stat dockerfile", "path", dockerfilePath, "err", err) // In this case we know that the dockerfile is not in the location the compose file specifies, // so can assume that the dockerfile has been normalized to the default "Dockerfile". if svccfg.Build.Dockerfile != "Dockerfile" { @@ -252,7 +252,7 @@ func fixupLLM(svccfg *composeTypes.ServiceConfig) { // HACK: we must have at least one host port to get a CNAME for the service // litellm listens on 4000 by default var port uint32 = liteLLMPort - slog.Debug(fmt.Sprintf("service %q: adding LLM host port %d", svccfg.Name, port)) + slog.Debug("adding LLM host port", "service", svccfg.Name, "port", port) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } } @@ -273,7 +273,7 @@ func fixupPostgresService(svccfg *composeTypes.ServiceConfig, provider client.Pr return err } } - slog.Debug(fmt.Sprintf("service %q: adding postgres host port %d", svccfg.Name, port)) + slog.Debug("adding postgres host port", "service", svccfg.Name, "port", port) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } else { fixupIngressPorts(svccfg) @@ -311,7 +311,7 @@ func fixupMongoService(svccfg *composeTypes.ServiceConfig, provider client.Provi } break // done } - slog.Debug(fmt.Sprintf("service %q: adding mongodb host port %d", svccfg.Name, port)) + slog.Debug("adding mongodb host port", "service", svccfg.Name, "port", port) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } else { fixupIngressPorts(svccfg) @@ -339,7 +339,7 @@ func fixupRedisService(svccfg *composeTypes.ServiceConfig, provider client.Provi // continue; last one wins } } - slog.Debug(fmt.Sprintf("service %q: adding redis host port %d", svccfg.Name, port)) + slog.Debug("adding redis host port", "service", svccfg.Name, "port", port) svccfg.Ports = []composeTypes.ServicePortConfig{{Target: port, Mode: Mode_HOST, Protocol: Protocol_TCP}} } else { fixupIngressPorts(svccfg) @@ -350,7 +350,7 @@ func fixupRedisService(svccfg *composeTypes.ServiceConfig, provider client.Provi func fixupIngressPorts(svccfg *composeTypes.ServiceConfig) { for i, port := range svccfg.Ports { if port.Mode == Mode_INGRESS || port.Mode == "" { - slog.Debug(fmt.Sprintf("service %q: changing port %d to host mode", svccfg.Name, port.Target)) + slog.Debug("changing port to host mode", "service", svccfg.Name, "port", port.Target) svccfg.Ports[i].Mode = Mode_HOST } } @@ -551,7 +551,7 @@ func fixupPort(port composeTypes.ServicePortConfig) composeTypes.ServicePortConf port.Mode = Mode_HOST } else { if port.Published != "" { - slog.Debug(fmt.Sprintf("port %d: ignoring 'published: %s' in 'ingress' mode", port.Target, port.Published)) + slog.Debug("ignoring 'published' in 'ingress' mode", "port", port.Target, "published", port.Published) } if port.AppProtocol == "" { // TCP ingress is not supported; assuming HTTP (add 'app_protocol: http' to silence)" diff --git a/src/pkg/cli/compose/loader.go b/src/pkg/cli/compose/loader.go index 0f42e286d..342cd263f 100644 --- a/src/pkg/cli/compose/loader.go +++ b/src/pkg/cli/compose/loader.go @@ -180,14 +180,14 @@ func (l *Loader) newProjectOptions(suppressWarn bool) (*cli.ProjectOptions, erro if inEnv && !suppressWarn { slog.Warn(fmt.Sprintf("Environment variable %q is ignored; add it to `.env` if needed", key)) } else { - slog.Debug(fmt.Sprintf("Unresolved environment variable %q", key)) + slog.Debug("Unresolved environment variable", "key", key) } return "", false } if inEnv && !suppressWarn { slog.Warn(fmt.Sprintf("Environment variable %q is ignored; add it to `.env` or it may be resolved from config during deployment", key)) } else { - slog.Debug(fmt.Sprintf("Environment variable %q was not resolved locally. It may be resolved from config during deployment", key)) + slog.Debug("Environment variable was not resolved locally. It may be resolved from config during deployment", "key", key) } // Leave unresolved variables as-is for resolution later by CD return "${" + key + "}", true diff --git a/src/pkg/cli/compose/serviceNameReplacer.go b/src/pkg/cli/compose/serviceNameReplacer.go index d11a893bd..a0d845c16 100644 --- a/src/pkg/cli/compose/serviceNameReplacer.go +++ b/src/pkg/cli/compose/serviceNameReplacer.go @@ -30,7 +30,7 @@ type ServiceNameReplacer struct { func NewServiceNameReplacer(ctx context.Context, dnsResolver client.DNSResolver, project *composeTypes.Project) ServiceNameReplacer { var skipPublicReplacement bool if err := dnsResolver.UpdateShardDomain(ctx); err != nil { - slog.Debug(fmt.Sprintf("failed to update shard domain: %v", err)) + slog.Debug("failed to update shard domain", "error", err) skipPublicReplacement = true } // Create a regexp to detect private service names in environment variable and build arg values @@ -89,9 +89,9 @@ func (s *ServiceNameReplacer) ReplaceServiceNameWithDNS(serviceName string, key, val := s.replaceServiceNameWithDNS(value) if val != value { - slog.Debug(fmt.Sprintf("service %q: service name was adjusted: %s %q assigned value %q", serviceName, fixupTarget, key, val)) + slog.Debug("service name was adjusted", "service", serviceName, "fixupTarget", fixupTarget, "key", key, "value", val) } else if s.publicServiceNames != nil && s.publicServiceNames.MatchString(value) { - slog.Debug(fmt.Sprintf("service %q: service name in the %s %q was not adjusted; only references to other services with port mode set to 'host' will be fixed-up", serviceName, fixupTarget, key)) + slog.Debug("service name was not adjusted; only references to other services with port mode set to 'host' will be fixed-up", "service", serviceName, "fixupTarget", fixupTarget, "key", key) } return val diff --git a/src/pkg/cli/compose/validation.go b/src/pkg/cli/compose/validation.go index 522b17e22..34eaa41b9 100644 --- a/src/pkg/cli/compose/validation.go +++ b/src/pkg/cli/compose/validation.go @@ -59,16 +59,16 @@ func ValidateProject(project *composeTypes.Project, mode modes.Mode) error { func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.Project, mode modes.Mode) error { if svccfg.ReadOnly { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: read_only", svccfg.Name)) + slog.Debug("service: unsupported compose directive: read_only", "service", svccfg.Name) } if svccfg.Restart == "" { // This was a warning, but we don't really care and want to reduce the noise - slog.Debug(fmt.Sprintf("service %q: missing compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", svccfg.Name)) + slog.Debug("service: missing compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", "service", svccfg.Name) } else if svccfg.Restart != "always" && svccfg.Restart != "unless-stopped" { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", svccfg.Name)) + slog.Debug("service: unsupported compose directive: restart; assuming 'unless-stopped' (add 'restart' to silence)", "service", svccfg.Name) } if svccfg.ContainerName != "" { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: container_name", svccfg.Name)) + slog.Debug("service: unsupported compose directive: container_name", "service", svccfg.Name) } if svccfg.Hostname != "" { return fmt.Errorf("service %q: unsupported compose directive: hostname; consider using 'domainname' instead", svccfg.Name) @@ -77,7 +77,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: dns_search", svccfg.Name) } if len(svccfg.DNSOpts) != 0 { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: dns_opt", svccfg.Name)) + slog.Debug("service: unsupported compose directive: dns_opt", "service", svccfg.Name) } if len(svccfg.DNS) != 0 { return fmt.Errorf("service %q: unsupported compose directive: dns", svccfg.Name) @@ -95,30 +95,30 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: group_add", svccfg.Name) } if len(svccfg.Ipc) > 0 { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: ipc", svccfg.Name)) + slog.Debug("service: unsupported compose directive: ipc", "service", svccfg.Name) } if len(svccfg.Uts) > 0 { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: uts", svccfg.Name)) + slog.Debug("service: unsupported compose directive: uts", "service", svccfg.Name) } if svccfg.Isolation != "" { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: isolation", svccfg.Name)) + slog.Debug("service: unsupported compose directive: isolation", "service", svccfg.Name) } if svccfg.MacAddress != "" { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: mac_address", svccfg.Name)) + slog.Debug("service: unsupported compose directive: mac_address", "service", svccfg.Name) } if len(svccfg.Labels) > 0 { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: labels", svccfg.Name)) // TODO: add support for labels + slog.Debug("service: unsupported compose directive: labels", "service", svccfg.Name) // TODO: add support for labels } if len(svccfg.Links) > 0 { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: links", svccfg.Name)) + slog.Debug("service: unsupported compose directive: links", "service", svccfg.Name) } if svccfg.Logging != nil { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: logging", svccfg.Name)) + slog.Debug("service: unsupported compose directive: logging", "service", svccfg.Name) } for name := range svccfg.Networks { if _, ok := project.Networks[name]; !ok { // This was a warning, but we don't really care and want to reduce the noise - slog.Debug(fmt.Sprintf("service %q: network %q is not defined in the top-level networks section", svccfg.Name, name)) + slog.Debug("service: network is not defined in the top-level networks section", "service", svccfg.Name, "network", name) } } if len(svccfg.Volumes) > 0 { @@ -144,22 +144,22 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: build ssh", svccfg.Name) } if len(svccfg.Build.Labels) != 0 { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: build labels", svccfg.Name)) // TODO: add support for Kaniko --label + slog.Debug("service: unsupported compose directive: build labels", "service", svccfg.Name) // TODO: add support for Kaniko --label } if len(svccfg.Build.CacheFrom) != 0 { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: build cache_from", svccfg.Name)) + slog.Debug("service: unsupported compose directive: build cache_from", "service", svccfg.Name) } if len(svccfg.Build.CacheTo) != 0 { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: build cache_to", svccfg.Name)) + slog.Debug("service: unsupported compose directive: build cache_to", "service", svccfg.Name) } if svccfg.Build.NoCache { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: build no_cache", svccfg.Name)) + slog.Debug("service: unsupported compose directive: build no_cache", "service", svccfg.Name) } if len(svccfg.Build.ExtraHosts) != 0 { return fmt.Errorf("service %q: unsupported compose directive: build extra_hosts", svccfg.Name) } if svccfg.Build.Isolation != "" { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: build isolation", svccfg.Name)) + slog.Debug("service: unsupported compose directive: build isolation", "service", svccfg.Name) } if svccfg.Build.Network != "" { return fmt.Errorf("service %q: unsupported compose directive: build network", svccfg.Name) @@ -193,7 +193,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P // secret.Target will always be automatically constructed by compose-go to "/run/secrets/" if s, ok := project.Secrets[secret.Source]; !ok { // This was a warning, but we don't really care and want to reduce the noise - slog.Debug(fmt.Sprintf("secret %q is not defined in the top-level secrets section", secret.Source)) + slog.Debug("secret is not defined in the top-level secrets section", "secret", secret.Source) } else if s.Name != "" && s.Name != secret.Source { return fmt.Errorf("unsupported secret %q: cannot override name %q", secret.Source, s.Name) // TODO: support custom secret names } else if !s.External { @@ -213,7 +213,7 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P // show warning if sensitive information is detected if isSecret { slog.Warn(fmt.Sprintf("service %q: environment %q may contain sensitive information; consider using 'defang config set %s' to securely store this value", svccfg.Name, key, key)) - slog.Debug(fmt.Sprintf("service %q: environment %q may contain detected secrets of type: %v", svccfg.Name, key, ds)) + slog.Debug("service: environment may contain detected secrets", "service", svccfg.Name, "key", key, "types", ds) } } } @@ -250,10 +250,10 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: healthcheck timeout %fs must be positive and smaller than the interval %fs", svccfg.Name, timeout, interval) } if svccfg.HealthCheck.StartPeriod != nil { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: healthcheck start_period", svccfg.Name)) + slog.Debug("service: unsupported compose directive: healthcheck start_period", "service", svccfg.Name) } if svccfg.HealthCheck.StartInterval != nil { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: healthcheck start_interval", svccfg.Name)) + slog.Debug("service: unsupported compose directive: healthcheck start_interval", "service", svccfg.Name) } } var replicas int @@ -275,17 +275,17 @@ func validateService(svccfg *composeTypes.ServiceConfig, project *composeTypes.P return fmt.Errorf("service %q: unsupported compose directive: deploy endpoint_mode", svccfg.Name) } if svccfg.Deploy.Resources.Limits != nil && svccfg.Deploy.Resources.Reservations == nil { - slog.Debug(fmt.Sprintf("service %q: no reservations specified; using limits as reservations", svccfg.Name)) + slog.Debug("service: no reservations specified; using limits as reservations", "service", svccfg.Name) } reservations = getResourceReservations(svccfg.Deploy.Resources) if reservations != nil && reservations.NanoCPUs < 0 { // "0" just means "as small as possible" return fmt.Errorf("service %q: invalid value for cpus: %v", svccfg.Name, reservations.NanoCPUs) } if len(svccfg.Deploy.Labels) > 0 { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: deploy labels", svccfg.Name)) + slog.Debug("service: unsupported compose directive: deploy labels", "service", svccfg.Name) } if len(svccfg.Deploy.Placement.Constraints) != 0 || len(svccfg.Deploy.Placement.Preferences) != 0 || svccfg.Deploy.Placement.MaxReplicas != 0 { - slog.Debug(fmt.Sprintf("service %q: unsupported compose directive: deploy placement", svccfg.Name)) + slog.Debug("service: unsupported compose directive: deploy placement", "service", svccfg.Name) } if svccfg.Deploy.Replicas != nil { replicas = *svccfg.Deploy.Replicas diff --git a/src/pkg/cli/composeDown.go b/src/pkg/cli/composeDown.go index 44eae2bcf..18e6be75c 100644 --- a/src/pkg/cli/composeDown.go +++ b/src/pkg/cli/composeDown.go @@ -3,7 +3,6 @@ package cli import ( "context" "errors" - "fmt" "log/slog" "github.com/AlecAivazis/survey/v2" @@ -14,7 +13,7 @@ import ( ) func ComposeDown(ctx context.Context, projectName string, fabric client.FabricClient, provider client.Provider) (types.ETag, error) { - slog.Debug(fmt.Sprintf("Destroying project %q", projectName)) + slog.Debug("Destroying project", "project", projectName) // If no names are provided, destroy the entire project return CdCommand(ctx, projectName, provider, fabric, client.CdCommandDestroy) diff --git a/src/pkg/cli/configDelete.go b/src/pkg/cli/configDelete.go index 29cddef10..b7d90b871 100644 --- a/src/pkg/cli/configDelete.go +++ b/src/pkg/cli/configDelete.go @@ -2,7 +2,6 @@ package cli import ( "context" - "fmt" "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" @@ -11,7 +10,7 @@ import ( ) func ConfigDelete(ctx context.Context, projectName string, provider client.Provider, names ...string) error { - slog.Debug(fmt.Sprintf("Deleting config %v in project %q", names, projectName)) + slog.Debug("Deleting config in project", "names", names, "project", projectName) if dryrun.DoDryRun { return dryrun.ErrDryRun diff --git a/src/pkg/cli/configList.go b/src/pkg/cli/configList.go index d94a9830b..7bcc707fd 100644 --- a/src/pkg/cli/configList.go +++ b/src/pkg/cli/configList.go @@ -2,7 +2,6 @@ package cli import ( "context" - "fmt" "log/slog" "github.com/DefangLabs/defang/src/pkg/cli/client" @@ -15,7 +14,7 @@ type PrintConfig struct { } func ConfigList(ctx context.Context, projectName string, provider client.Provider) error { - slog.Debug(fmt.Sprintf("Listing config in project %q", projectName)) + slog.Debug("Listing config", "project", projectName) config, err := provider.ListConfig(ctx, &defangv1.ListConfigsRequest{Project: projectName}) if err != nil { diff --git a/src/pkg/cli/configSet.go b/src/pkg/cli/configSet.go index 5e5032035..06da4c740 100644 --- a/src/pkg/cli/configSet.go +++ b/src/pkg/cli/configSet.go @@ -28,7 +28,7 @@ type ConfigManager interface { } func ConfigSet(ctx context.Context, projectName string, provider ConfigManager, name string, value string, options ConfigSetOptions) (bool, error) { - slog.Debug(fmt.Sprintf("Setting config %q in project %q", name, projectName)) + slog.Debug("Setting config", "name", name, "project", projectName) if !pkg.IsValidSecretName(name) { return false, ErrInvalidConfigName{Name: name} diff --git a/src/pkg/cli/connect.go b/src/pkg/cli/connect.go index 1d4d4a72f..e1b57fd33 100644 --- a/src/pkg/cli/connect.go +++ b/src/pkg/cli/connect.go @@ -15,7 +15,7 @@ import ( // Connect builds a client carrying the requested tenant (name or ID). func Connect(fabricAddr string, requestedTenant types.TenantNameOrID) *client.GrpcClient { host := client.NormalizeHost(fabricAddr) - slog.Debug(fmt.Sprintf("Using tenant %q for cluster %q", requestedTenant, host)) + slog.Debug("Using tenant for cluster", "tenant", requestedTenant, "cluster", host) accessToken := client.GetExistingToken(host) return client.NewGrpcClient(host, accessToken, requestedTenant) @@ -36,7 +36,7 @@ func ConnectWithTenant(ctx context.Context, fabricAddr string, requestedTenant t func NewProvider(ctx context.Context, providerID client.ProviderID, fabricClient client.FabricClient, stack string) client.Provider { var provider client.Provider - slog.Debug(fmt.Sprintf("Creating %s provider", providerID)) + slog.Debug("Creating provider", "provider", providerID) switch providerID { case client.ProviderAWS: provider = aws.NewByocProvider(ctx, fabricClient.GetTenantName(), stack) diff --git a/src/pkg/cli/estimate.go b/src/pkg/cli/estimate.go index 436a38956..45ad5e469 100644 --- a/src/pkg/cli/estimate.go +++ b/src/pkg/cli/estimate.go @@ -22,7 +22,7 @@ import ( ) func RunEstimate(ctx context.Context, project *compose.Project, client client.FabricClient, previewProvider client.Provider, estimateProviderID client.ProviderID, region string, mode modes.Mode) (*defangv1.EstimateResponse, error) { - slog.Debug(fmt.Sprintf("Running estimate for project %s in region %s with mode %s", project.Name, region, mode)) + slog.Debug("Running estimate for project", "project", project.Name, "region", region, "mode", mode) preview, err := GeneratePreview(ctx, project, client, previewProvider, estimateProviderID, mode, region) if err != nil { return nil, err diff --git a/src/pkg/cli/getServices.go b/src/pkg/cli/getServices.go index a50500b7b..c37e3fa17 100644 --- a/src/pkg/cli/getServices.go +++ b/src/pkg/cli/getServices.go @@ -52,7 +52,7 @@ func PrintLongServices(ctx context.Context, projectName string, provider client. } func GetServices(ctx context.Context, projectName string, provider client.Provider) ([]ServiceLineItem, error) { - slog.Debug(fmt.Sprintf("Listing services in project %q", projectName)) + slog.Debug("Listing services in project", "project", projectName) servicesResponse, err := provider.GetServices(ctx, &defangv1.GetServicesRequest{Project: projectName}) if err != nil { @@ -113,7 +113,7 @@ func GetHealthcheckResults(ctx context.Context, serviceInfos []*defangv1.Service defer wg.Done() result, err := RunHealthcheck(ctx, serviceInfo.Service.Name, "https://"+endpoint, serviceInfo.HealthcheckPath) if err != nil { - slog.Debug(fmt.Sprintf("Healthcheck error for service %q at endpoint %q: %s", serviceInfo.Service.Name, endpoint, err.Error())) + slog.Debug("Healthcheck error", "service", serviceInfo.Service.Name, "endpoint", endpoint, "err", err) result = "error" } *results[serviceInfo.Service.Name] = result @@ -136,17 +136,17 @@ func RunHealthcheck(ctx context.Context, name, endpoint, path string) (string, e if err != nil { return "", err } - slog.Debug(fmt.Sprintf("[%s] checking health at %s", name, url)) + slog.Debug("checking health", "service", name, "url", url) resp, err := http.DefaultClient.Do(req) if err != nil { return "", err } defer resp.Body.Close() if resp.StatusCode >= 200 && resp.StatusCode < 400 { - slog.Debug(fmt.Sprintf("[%s] ✔ healthy", name)) + slog.Debug("healthy", "service", name) return "healthy", nil } else { - slog.Debug(fmt.Sprintf("[%s] ✘ unhealthy (%s)", name, resp.Status)) + slog.Debug("unhealthy", "service", name, "status", resp.Status) return "unhealthy (" + resp.Status + ")", nil } } diff --git a/src/pkg/cli/new.go b/src/pkg/cli/new.go index e2492e66b..e3458e8ff 100644 --- a/src/pkg/cli/new.go +++ b/src/pkg/cli/new.go @@ -36,7 +36,7 @@ func FetchSamples(ctx context.Context) ([]Sample, error) { return nil, err } defer resp.Body.Close() - slog.Debug(fmt.Sprintf("%v", resp.Header)) + slog.Debug("Response header", "header", resp.Header) reader := resp.Body if resp.Header.Get("Content-Encoding") == "gzip" { reader, err = gzip.NewReader(resp.Body) @@ -69,7 +69,7 @@ func copyFromSamples(ctx context.Context, dir string, names []string, skipExisti return err } defer resp.Body.Close() - slog.Debug(fmt.Sprintf("%v", resp.Header)) + slog.Debug("Response header", "header", resp.Header) tarball, err := gzip.NewReader(resp.Body) if err != nil { return fmt.Errorf("failed to read tarball: %w", err) diff --git a/src/pkg/cli/subscribe.go b/src/pkg/cli/subscribe.go index f94e7d8cf..b3950bd3d 100644 --- a/src/pkg/cli/subscribe.go +++ b/src/pkg/cli/subscribe.go @@ -25,7 +25,7 @@ func WaitServiceState( etag types.ETag, services []string, ) (ServiceStates, error) { - slog.Debug(fmt.Sprintf("waiting for services %v to reach state %s\n", services, targetState)) // TODO: don't print in Go-routine + slog.Debug("waiting for services to reach state", "services", services, "state", targetState) // TODO: don't print in Go-routine if len(services) == 0 { return nil, ErrNothingToMonitor @@ -60,7 +60,7 @@ func WaitServiceState( if connect.CodeOf(err) == connect.CodeResourceExhausted { slog.WarnContext(ctx, fmt.Sprintf("quota exceeded; will retry subscribe stream after backoff: %v", err)) } else { - slog.Debug(fmt.Sprintf("WaitServiceState: transient error, reconnecting subscribe stream: %v", err)) + slog.Debug("WaitServiceState: transient error, reconnecting subscribe stream", "err", err) } if err := provider.DelayBeforeRetry(ctx); err != nil { return serviceStates, err @@ -89,10 +89,10 @@ func WaitServiceState( continue } - slog.Debug(fmt.Sprintf("Service update: %s: state=%s and status=%s\n", msg.Name, msg.State, msg.Status)) // TODO: don't print in Go-routine + slog.Debug("Service update", "name", msg.Name, "state", msg.State, "status", msg.Status) // TODO: don't print in Go-routine if _, ok := serviceStates[msg.Name]; !ok { - slog.Debug(fmt.Sprintf("unexpected service %s update", msg.Name)) // TODO: don't print in Go-routine + slog.Debug("unexpected service update", "name", msg.Name) // TODO: don't print in Go-routine continue } if msg.State == defangv1.ServiceState_NOT_SPECIFIED { diff --git a/src/pkg/cli/tail.go b/src/pkg/cli/tail.go index 654574690..72cc57f9d 100644 --- a/src/pkg/cli/tail.go +++ b/src/pkg/cli/tail.go @@ -147,7 +147,7 @@ func Tail(ctx context.Context, provider client.Provider, projectName string, opt options.LogType = logs.LogTypeAll } - slog.Debug(fmt.Sprintf("Tailing %s logs in project %q", options.LogType, projectName)) + slog.Debug("Tailing logs in project", "logType", options.LogType, "project", projectName) if options.Deployment != "" { _, err := types.ParseEtag(options.Deployment) diff --git a/src/pkg/cli/token.go b/src/pkg/cli/token.go index 83e160d2a..ee343af27 100644 --- a/src/pkg/cli/token.go +++ b/src/pkg/cli/token.go @@ -38,7 +38,7 @@ func Token(ctx context.Context, client client.FabricClient, tenant types.TenantN scopes = []string{string(s)} } - slog.Debug(fmt.Sprintf("Generating token for tenant %q with scopes %v", tenant, scopes)) + slog.Debug("Generating token for tenant", "tenant", tenant, "scopes", scopes) resp, err := client.Token(ctx, &defangv1.TokenRequest{ Assertion: at, diff --git a/src/pkg/cli/upgrade.go b/src/pkg/cli/upgrade.go index 70cdd9aec..62167f27e 100644 --- a/src/pkg/cli/upgrade.go +++ b/src/pkg/cli/upgrade.go @@ -3,7 +3,6 @@ package cli import ( "context" "errors" - "fmt" "log/slog" "os" "os/exec" @@ -20,13 +19,13 @@ func Upgrade(ctx context.Context) error { if err != nil { return err } - slog.Debug(fmt.Sprintf(" - Executable: %s\n", ex)) + slog.Debug("Executable path", "path", ex) ex, err = filepath.EvalSymlinks(ex) if err != nil { return err } - slog.Debug(fmt.Sprintf(" - Evaluated: %s\n", ex)) + slog.Debug("Evaluated executable path", "path", ex) if strings.HasPrefix(ex, "/nix/store/") { // Detect whether the user has used Flakes or nix-env diff --git a/src/pkg/cli/waitForCdTaskExit.go b/src/pkg/cli/waitForCdTaskExit.go index a7970c2af..e25aefacc 100644 --- a/src/pkg/cli/waitForCdTaskExit.go +++ b/src/pkg/cli/waitForCdTaskExit.go @@ -19,7 +19,7 @@ func WaitForCdTaskExit(ctx context.Context, provider client.Provider) error { select { case <-ticker.C: done, err := provider.GetDeploymentStatus(ctx) - // slog.Debug(fmt.Sprintf("Polled CD task status: done=%v, err=%v", done, err)) + // slog.Debug("Polled CD task status", "done", done, "err", err) if err != nil { // End condition: EOF indicates that the task has completed successfully if errors.Is(err, io.EOF) { diff --git a/src/pkg/clouds/aws/login.go b/src/pkg/clouds/aws/login.go index 55e917847..22ef3d2d4 100644 --- a/src/pkg/clouds/aws/login.go +++ b/src/pkg/clouds/aws/login.go @@ -75,7 +75,7 @@ func (p *awsOAuthCredentialsProvider) Retrieve(ctx context.Context) (awssdk.Cred slog.Debug("AWS OAuth access token expired, refreshing...") refreshed, err := refreshToken(ctx, p.cached) if err != nil { - slog.Debug(fmt.Sprintf("failed to refresh AWS OAuth token: %v", err)) + slog.Debug("failed to refresh AWS OAuth token", "error", err) return awssdk.Credentials{}, fmt.Errorf("refreshing AWS OAuth token: %w", err) } @@ -88,7 +88,7 @@ func (p *awsOAuthCredentialsProvider) Retrieve(ctx context.Context) (awssdk.Cred if err := p.tokenStore.Save(p.storeKey, string(tokenBytes)); err != nil { slog.WarnContext(ctx, fmt.Sprintf("failed to persist refreshed AWS OAuth token: %v", err)) } else { - slog.Debug(fmt.Sprintf("persisted refreshed AWS OAuth token for %q", p.storeKey)) + slog.Debug("persisted refreshed AWS OAuth token", "storeKey", p.storeKey) } } @@ -127,12 +127,12 @@ func (a *Aws) Authenticate(ctx context.Context, interactive bool) error { } // 1. Try default AWS credentials - slog.Debug(fmt.Sprintf("checking default AWS credentials for region %s...", a.Region)) + slog.Debug("checking default AWS credentials...", "region", a.Region) if _, err := a.testCredentials(ctx, nil); err != nil { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - slog.Debug(fmt.Sprintf("default AWS credentials invalid: %v", err)) + slog.Debug("default AWS credentials invalid", "error", err) } else { slog.Debug("found valid default AWS credentials") return nil @@ -206,13 +206,13 @@ func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProv for _, name := range tokenNames { tokenJSON, err := a.TokenStore.Load(name) if err != nil { - slog.Debug(fmt.Sprintf("failed to load token %q: %v", name, err)) + slog.Debug("failed to load token", "name", name, "error", err) continue } var cached awsTokenCache if err := json.Unmarshal([]byte(tokenJSON), &cached); err != nil { - slog.Debug(fmt.Sprintf("failed to unmarshal token %q: %v", name, err)) + slog.Debug("failed to unmarshal token", "name", name, "error", err) continue } @@ -222,11 +222,11 @@ func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProv } if cached.RefreshToken == "" && time.Now().After(cached.AccessToken.ExpiresAt) { - slog.Debug(fmt.Sprintf("token %q is expired and has no refresh token, skipping", name)) + slog.Debug("token is expired and has no refresh token, skipping", "name", name) continue } - slog.Debug(fmt.Sprintf("testing token %q (expires %s)...", name, cached.AccessToken.ExpiresAt.Format(time.RFC3339))) + slog.Debug("testing token...", "name", name, "expires", cached.AccessToken.ExpiresAt.Format(time.RFC3339)) provider := &awsOAuthCredentialsProvider{cached: &cached, tokenStore: a.TokenStore, storeKey: name} // Calling testCredentialsWithProfile triggers Retrieve(), which auto-refreshes @@ -237,7 +237,7 @@ func (a *Aws) findStoredCredentials(ctx context.Context) (awssdk.CredentialsProv if ctx.Err() != nil { return nil, ctx.Err() } - slog.Debug(fmt.Sprintf("token %q failed AWS_PROFILE role validation: %v, skipping...", name, err)) + slog.Debug("token failed AWS_PROFILE role validation, skipping...", "name", name, "error", err) continue } return creds, nil @@ -265,11 +265,11 @@ func (a *Aws) testCredentialsWithProfile(ctx context.Context, name string, creds if err != nil { slog.WarnContext(ctx, fmt.Sprintf("failed to compare token identity with AWS_PROFILE role: %v", err)) } else if same { - slog.Debug(fmt.Sprintf("token %q identity %q matches AWS_PROFILE role %q", name, *identity.Arn, roleArn)) + slog.Debug("token identity matches AWS_PROFILE role", "name", name, "arn", *identity.Arn, "roleArn", roleArn) return creds, nil } - slog.Debug(fmt.Sprintf("checking if token %q identity %q can assume AWS_PROFILE role %q", name, *identity.Arn, roleArn)) + slog.Debug("checking if token identity can assume AWS_PROFILE role", "name", name, "arn", *identity.Arn, "roleArn", roleArn) credCfg, err := LoadDefaultConfig(ctx, config.WithRegion(string(a.Region)), config.WithCredentialsProvider(creds)) if err != nil { return nil, err @@ -293,7 +293,7 @@ func (a *Aws) testCredentialsWithProfile(ctx context.Context, name string, creds return creds, nil } // If able to assume the profile role, use the assumed role credentials - slog.Debug(fmt.Sprintf("token %q is valid and can assume AWS_PROFILE role %q\n", name, roleArn)) + slog.Debug("token is valid and can assume AWS_PROFILE role", "name", name, "roleArn", roleArn) return assumeRoleProvider, nil } // If no AWS_PROFILE with role specified, any valid token is considered acceptable diff --git a/src/pkg/clouds/do/appPlatform/setup.go b/src/pkg/clouds/do/appPlatform/setup.go index 661e14bd7..d633223ed 100644 --- a/src/pkg/clouds/do/appPlatform/setup.go +++ b/src/pkg/clouds/do/appPlatform/setup.go @@ -90,7 +90,7 @@ func (d *DoApp) SetUpBucket(ctx context.Context) error { } func getImageSourceSpec(cdImagePath string) (*godo.ImageSourceSpec, error) { - slog.Debug(fmt.Sprintf("Using CD image: %q", cdImagePath)) + slog.Debug("Using CD image", "cdImagePath", cdImagePath) image, err := dockerhub.ParseImage(cdImagePath) if err != nil { return nil, err @@ -146,7 +146,7 @@ func (d DoApp) Run(ctx context.Context, env []*godo.AppVariableDefinition, cdIma appList, _, err := client.Apps.List(ctx, &godo.ListOptions{}) if err != nil { - slog.Debug(fmt.Sprintf("Error listing apps: %s", err)) + slog.Debug("Error listing apps", "error", err) } for _, app := range appList { diff --git a/src/pkg/clouds/gcp/api.go b/src/pkg/clouds/gcp/api.go index 059f694c9..600e79de9 100644 --- a/src/pkg/clouds/gcp/api.go +++ b/src/pkg/clouds/gcp/api.go @@ -27,7 +27,7 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { projectName := "projects/" + gcp.ProjectId for i := range maxAttempts { - slog.Debug(fmt.Sprintf("Enabling services: %v\n", apis)) + slog.Debug("Enabling services", "apis", apis) req := &serviceusage.BatchEnableServicesRequest{ ServiceIds: apis, } @@ -41,7 +41,7 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { } slog.ErrorContext(ctx, fmt.Sprintf("Error: %+v (%T)", err, err)) if i < maxAttempts-1 { - slog.Debug(fmt.Sprintf("Failed to enable services, will retry in %v: %v\n", retryInterval, err)) + slog.Debug("Failed to enable services, will retry", "retryInterval", retryInterval, "error", err) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } @@ -58,7 +58,7 @@ func (gcp Gcp) EnsureAPIsEnabled(ctx context.Context, apis ...string) error { } else if op.Done { // Check if the operation is done if op.Error != nil { if i < maxAttempts-1 { - slog.Debug(fmt.Sprintf("Failed to enable services operation, will retry in %v: %v\n", retryInterval, op.Error)) + slog.Debug("Failed to enable services operation, will retry", "retryInterval", retryInterval, "error", op.Error) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } diff --git a/src/pkg/clouds/gcp/iam.go b/src/pkg/clouds/gcp/iam.go index 0a54fe326..764a0542b 100644 --- a/src/pkg/clouds/gcp/iam.go +++ b/src/pkg/clouds/gcp/iam.go @@ -36,7 +36,7 @@ func (gcp Gcp) EnsureRoleExists(ctx context.Context, roleId, title, description role.GetTitle() == title && role.GetDescription() == description && role.Stage == iamadmpb.Role_GA { - slog.Debug(fmt.Sprintf("Role %s already exists", roleId)) + slog.Debug("Role already exists", "roleId", roleId) return role.Name, nil } @@ -68,7 +68,7 @@ func (gcp Gcp) EnsureRoleExists(ctx context.Context, roleId, title, description if err != nil { return "", fmt.Errorf("failed to create role: %w", err) } - slog.Debug(fmt.Sprintf("Role %s created successfully.", roleId)) + slog.Debug("Role created successfully", "roleId", roleId) } // Wait for the role to be created or updated @@ -102,7 +102,7 @@ func (gcp Gcp) EnsureServiceAccountExists(ctx context.Context, serviceAccountId, if err == nil { if account.GetDisplayName() == displayName && account.GetDescription() == description { - slog.Debug(fmt.Sprintf("Service account %s already exists", serviceAccountId)) + slog.Debug("Service account already exists", "serviceAccountId", serviceAccountId) return account.Name, nil } @@ -130,7 +130,7 @@ func (gcp Gcp) EnsureServiceAccountExists(ctx context.Context, serviceAccountId, return "", fmt.Errorf("failed to create service account: %w", err) } - slog.Debug(fmt.Sprintf("Service account %s created successfully.", serviceAccountId)) + slog.Debug("Service account created successfully", "serviceAccountId", serviceAccountId) accountName := account.Name for start := time.Now(); time.Since(start) < 5*time.Minute; { account, err = client.GetServiceAccount(ctx, &iamadmpb.GetServiceAccountRequest{Name: accountName}) @@ -188,7 +188,7 @@ func (gcp Gcp) EnsurePrincipalHasBucketRoles(ctx context.Context, bucketName, pr } if !needUpdate { - slog.Debug(fmt.Sprintf("Principal %s already has roles %v on bucket %s", principal, roles, bucketName)) + slog.Debug("Principal already has roles on bucket", "principal", principal, "roles", roles, "bucket", bucketName) return nil } @@ -345,7 +345,7 @@ func ensurePrincipalHasRolesWithResource(ctx context.Context, client resourceWit } if !bindingNeedsUpdate && len(rolesNotFound) == 0 { - slog.Debug(fmt.Sprintf("%s already has roles %v on resource %s", principal, roles, resource)) + slog.Debug("Principal already has roles on resource", "principal", principal, "roles", roles, "resource", resource) return nil } slog.InfoContext(ctx, "Updating IAM policy for resource "+resource) @@ -353,7 +353,7 @@ func ensurePrincipalHasRolesWithResource(ctx context.Context, client resourceWit for i := range maxAttempts { // Service account might not be visible for a few seconds after creation for policy attachment if _, err := client.SetIamPolicy(ctx, &iampb.SetIamPolicyRequest{Resource: resource, Policy: policy}); err != nil { if i < maxAttempts-1 { - slog.Debug(fmt.Sprintf("Failed to set IAM policy for resource %s, will retry in %v: %v\n", resource, retryInterval, err)) + slog.Debug("Failed to set IAM policy for resource, will retry", "resource", resource, "retryInterval", retryInterval, "error", err) if err := pkg.SleepWithContext(ctx, retryInterval); err != nil { return err } diff --git a/src/pkg/clouds/gcp/login.go b/src/pkg/clouds/gcp/login.go index 5613e13d8..c5ac20589 100644 --- a/src/pkg/clouds/gcp/login.go +++ b/src/pkg/clouds/gcp/login.go @@ -111,12 +111,12 @@ func (gcp *Gcp) Authenticate(ctx context.Context, interactive bool) error { // 1. Try the default application credentials or from the "GOOGLE_APPLICATION_CREDENTIALS" env var if set // - if the user has login with glcoud cli with application default credentials // - if the user has set GOOGLE_APPLICATION_CREDENTIALS to a service account key file with required permissions - slog.Debug(fmt.Sprintf("checking if application default credentials are available and has permission, GOOGLE_APPLICATION_CREDENTIALS=%q...", os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"))) + slog.Debug("checking if application default credentials are available and has permission...", "GOOGLE_APPLICATION_CREDENTIALS", os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")) if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, nil); err != nil { if ctx.Err() != nil { // Fast fail if context is done, no need to try other credential sources return ctx.Err() } - slog.Debug(fmt.Sprintf("the application default credentials are missing permissions: %v", err)) + slog.Debug("the application default credentials are missing permissions", "error", err) } else { slog.Debug("found valid application default credentials with required permissions") // No need to pass down ADC token source via options since ADC is automatically used by gcp sdk @@ -230,16 +230,16 @@ func (gcp *Gcp) findStoredCredentials(ctx context.Context) (oauth2.TokenSource, slog.WarnContext(ctx, fmt.Sprintf("failed to parse previously saved auth token %q: %v", name, err)) continue } - slog.Debug(fmt.Sprintf("Testing token %q from store for required permissions...", name)) + slog.Debug("Testing token from store for required permissions...", "name", name) tokenSource := config.TokenSource(ctx, &token) if err := testTokenProjectPermissions(ctx, gcp.ProjectId, requiredPerms, tokenSource); err == nil { - slog.Debug(fmt.Sprintf("Token %q is valid and has required permissions\n", name)) + slog.Debug("Token is valid and has required permissions", "name", name) currentToken, err := tokenSource.Token() if err != nil { return nil, fmt.Errorf("failed to retrieve current token from token source: %w", err) } if currentToken.AccessToken != token.AccessToken || currentToken.Expiry != token.Expiry || currentToken.RefreshToken != token.RefreshToken { - slog.Debug(fmt.Sprintf("Token %q has been updated, persisting updated token...\n", name)) + slog.Debug("Token has been updated, persisting updated token...", "name", name) bytes, err := json.Marshal(currentToken) if err != nil { return nil, fmt.Errorf("failed to marshal updated token: %w", err) @@ -255,7 +255,7 @@ func (gcp *Gcp) findStoredCredentials(ctx context.Context) (oauth2.TokenSource, if ctx.Err() != nil { return nil, ctx.Err() } - slog.Debug(fmt.Sprintf("Token %q is missing required permissions: %v\n", name, err)) + slog.Debug("Token is missing required permissions", "name", name, "error", err) continue } } @@ -266,7 +266,7 @@ func findGithubCredentials(ctx context.Context) (oauth2.TokenSource, string, err // If both ACTIONS_ID_TOKEN_REQUEST_URL and GOOGLE_WORKLOAD_IDENTITY_PROVIDER are set, we're doing "Workload Identity Federation" with GCP using github id token githubTokenReqUrl := os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL") gcpProvider := os.Getenv("GOOGLE_WORKLOAD_IDENTITY_PROVIDER") - slog.Debug(fmt.Sprintf("ACTIONS_ID_TOKEN_REQUEST_URL=%q, GOOGLE_WORKLOAD_IDENTITY_PROVIDER=%q", githubTokenReqUrl, gcpProvider)) + slog.Debug("GitHub Actions environment variables", "ACTIONS_ID_TOKEN_REQUEST_URL", githubTokenReqUrl, "GOOGLE_WORKLOAD_IDENTITY_PROVIDER", gcpProvider) if githubTokenReqUrl == "" || gcpProvider == "" { return nil, "", nil } diff --git a/src/pkg/clouds/gcp/storage.go b/src/pkg/clouds/gcp/storage.go index eea8d70b0..248175764 100644 --- a/src/pkg/clouds/gcp/storage.go +++ b/src/pkg/clouds/gcp/storage.go @@ -38,7 +38,7 @@ func (gcp Gcp) EnsureBucketExists(ctx context.Context, prefix string, versioning return "", fmt.Errorf("failed to get bucket with prefix %q: %w", prefix, err) } if existing != "" { - slog.Debug(fmt.Sprintf("Bucket %q already exists\n", existing)) + slog.Debug("Bucket already exists", "bucket", existing) err := gcp.UpdateBucketVersioning(ctx, existing, versioning) if err != nil { return "", fmt.Errorf("failed to ensure versioning is enabled on existing bucket %q: %w", existing, err) diff --git a/src/pkg/github/version.go b/src/pkg/github/version.go index 7b338643d..eaf058e38 100644 --- a/src/pkg/github/version.go +++ b/src/pkg/github/version.go @@ -35,12 +35,12 @@ func GetLatestReleaseTag(ctx context.Context) (string, error) { } defer resp.Body.Close() if resp.StatusCode != 200 { - slog.Debug(fmt.Sprintf("%v", resp.Header)) + slog.Debug("unexpected status", "header", resp.Header) // The primary rate limit for unauthenticated requests is 60 requests per hour, per IP. // The API returns a 403 status code when the rate limit is exceeded. githubError := githubError{Message: resp.Status} if err := json.NewDecoder(resp.Body).Decode(&githubError); err != nil { - slog.Debug(fmt.Sprintf("Failed to decode GitHub response: %v", err)) + slog.Debug("Failed to decode GitHub response", "error", err) } return "", fmt.Errorf("error fetching release info from GitHub: %s", githubError.Message) } diff --git a/src/pkg/mcp/mcp_server.go b/src/pkg/mcp/mcp_server.go index f0fb252ca..75638dbfc 100644 --- a/src/pkg/mcp/mcp_server.go +++ b/src/pkg/mcp/mcp_server.go @@ -30,7 +30,7 @@ type ToolTracker struct { func (t *ToolTracker) TrackTool(name string, handler server.ToolHandlerFunc) server.ToolHandlerFunc { return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { name := request.Params.Name - slog.Debug("MCP Tool Called: " + name + " with params: " + fmt.Sprintf("%+v", request.Params)) + slog.Debug("MCP Tool Called", "name", name, "params", request.Params) track.Evt("MCP Tool Called", track.P("tool", name), track.P("client", t.client), track.P("cluster", t.fabricAddr), track.P("provider", *t.providerId)) resp, err := handler(ctx, request) if err != nil { diff --git a/src/pkg/mcp/utils.go b/src/pkg/mcp/utils.go index 42f249bb2..5a65c6ab6 100644 --- a/src/pkg/mcp/utils.go +++ b/src/pkg/mcp/utils.go @@ -21,7 +21,7 @@ var knowledgeBaseFilenames = [...]string{"knowledge_base.json", "samples_example func SetupKnowledgeBase() error { slog.Debug("Setting up knowledge base") - slog.Debug(fmt.Sprintf("Attempting to download knowledge base files: %v", knowledgeBaseFilenames)) + slog.Debug("Attempting to download knowledge base files", "files", knowledgeBaseFilenames) // Create knowledge base directory if it doesn't exist slog.Debug("Creating knowledge base directory: " + KnowledgeBaseDir) diff --git a/src/pkg/migrate/heroku.go b/src/pkg/migrate/heroku.go index 9390475f6..d820ef137 100644 --- a/src/pkg/migrate/heroku.go +++ b/src/pkg/migrate/heroku.go @@ -37,7 +37,7 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf } applicationInfo.Dynos = dynos - slog.Debug(fmt.Sprintf("Dynos for the selected application: %+v\n", dynos)) + slog.Debug("Dynos for the selected application", "dynos", dynos) dynoSizes := make(map[string]HerokuDynoSize) for _, dyno := range dynos { @@ -49,7 +49,7 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf } applicationInfo.DynoSizes = dynoSizes - slog.Debug(fmt.Sprintf("Dyno sizes for the selected application: %+v\n", dynoSizes)) + slog.Debug("Dyno sizes for the selected application", "dynoSizes", dynoSizes) releaseTasks, err := client.GetReleaseTasks(ctx, appName) if err != nil { @@ -57,7 +57,7 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf } applicationInfo.ReleaseTasks = releaseTasks - slog.Debug(fmt.Sprintf("Release tasks for the selected application: %+v\n", releaseTasks)) + slog.Debug("Release tasks for the selected application", "releaseTasks", releaseTasks) slog.InfoContext(ctx, "Identifying configured addons") addons, err := client.ListAddons(ctx, appName) @@ -65,7 +65,7 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf return HerokuApplicationInfo{}, fmt.Errorf("failed to list Heroku addons: %w", err) } applicationInfo.Addons = addons - slog.Debug(fmt.Sprintf("Addons for the selected application: %+v\n", addons)) + slog.Debug("Addons for the selected application", "addons", addons) for _, addon := range addons { if addon.AddonService.Name == "heroku-postgresql" { @@ -77,7 +77,7 @@ func collectHerokuApplicationInfo(ctx context.Context, client HerokuClientInterf } } - slog.Debug(fmt.Sprintf("Postgres info for the selected application: %+v\n", applicationInfo.PGInfo)) + slog.Debug("Postgres info for the selected application", "pgInfo", applicationInfo.PGInfo) configVars, err := client.ListConfigVars(ctx, appName) if err != nil { @@ -327,7 +327,7 @@ func authenticateHerokuCLI() error { cmd.Stdin = bytes.NewBuffer([]byte{'\n'}) _, err = cmd.Output() if err != nil { - slog.Debug(fmt.Sprintf("Failed to run `heroku login`: %v", err)) + slog.Debug("Failed to run `heroku login`", "err", err) return err } @@ -349,7 +349,7 @@ func getHerokuAuthTokenFromCLI() (string, error) { slog.Info("The Heroku CLI is installed, we'll use it to generate a short-lived authorization token") err = authenticateHerokuCLI() if err != nil { - slog.Debug(fmt.Sprintf("Failed to authenticate Heroku CLI: %v", err)) + slog.Debug("Failed to authenticate Heroku CLI", "err", err) return "", err } slog.Debug("Successfully authenticated with Heroku") @@ -357,7 +357,7 @@ func getHerokuAuthTokenFromCLI() (string, error) { cmd := exec.Command("heroku", "authorizations:create", "--expires-in=300", "--json") output, err := cmd.Output() if err != nil { - slog.Debug(fmt.Sprintf("Failed to run `heroku authorizations:create`: %v", err)) + slog.Debug("Failed to run `heroku authorizations:create`", "err", err) return "", err } @@ -370,7 +370,7 @@ func getHerokuAuthTokenFromCLI() (string, error) { } err = json.Unmarshal(output, &result) if err != nil { - slog.Debug(fmt.Sprintf("Failed to parse Heroku CLI output: %v", err)) + slog.Debug("Failed to parse Heroku CLI output", "err", err) return "", err } if result.AccessToken.Token == "" { diff --git a/src/pkg/migrate/migrate.go b/src/pkg/migrate/migrate.go index 4050aee6c..44bd09c1d 100644 --- a/src/pkg/migrate/migrate.go +++ b/src/pkg/migrate/migrate.go @@ -26,7 +26,7 @@ func InteractiveSetup(ctx context.Context, fabric client.FabricClient, surveyor sourcePlatform = selected } - slog.Debug(fmt.Sprintf("Selected source platform: %s", sourcePlatform)) + slog.Debug("Selected source platform", "sourcePlatform", sourcePlatform) var composeFileContents string var err error @@ -56,7 +56,7 @@ func setupFromHeroku(ctx context.Context, fabric client.FabricClient, surveyor s // Here you can add logic to process the retrieved apps and set up the project accordingly // For now, we just print the apps - slog.Debug(fmt.Sprintf("Your Heroku applications: %+v\n", apps)) + slog.Debug("Your Heroku applications", "apps", apps) appNames := make([]string, len(apps)) for i, app := range apps { @@ -75,14 +75,14 @@ func setupFromHeroku(ctx context.Context, fabric client.FabricClient, surveyor s return "", fmt.Errorf("failed to collect Heroku application info: %w", err) } - slog.Debug(fmt.Sprintf("Application info: %+v\n", applicationInfo)) + slog.Debug("Application info", "applicationInfo", applicationInfo) sanitizedApplicationInfo, err := sanitizeHerokuApplicationInfo(applicationInfo) if err != nil { return "", fmt.Errorf("failed to sanitize Heroku application info: %w", err) } - slog.Debug(fmt.Sprintf("Sanitized application info: %+v\n", sanitizedApplicationInfo)) + slog.Debug("Sanitized application info", "sanitizedApplicationInfo", sanitizedApplicationInfo) slog.InfoContext(ctx, "Generating compose file...") @@ -129,7 +129,7 @@ func generateComposeFile(ctx context.Context, fabric client.FabricClient, platfo } responseStr := string(resp.Compose) - slog.Debug(fmt.Sprintf("Received compose response: %+v", responseStr)) + slog.Debug("Received compose response", "response", responseStr) // assume the response is markdown, // extract the contents of the first code block if there is one @@ -140,7 +140,7 @@ func generateComposeFile(ctx context.Context, fabric client.FabricClient, platfo composeContent = responseStr } else { previousError = err.Error() - slog.Debug(fmt.Sprintf("Failed to extract code block: %v. Retrying...", err)) + slog.Debug("Failed to extract code block. Retrying...", "err", err) continue } } @@ -156,7 +156,7 @@ func generateComposeFile(ctx context.Context, fabric client.FabricClient, platfo _, err = compose.LoadFromContentWithInterpolation(ctx, []byte(composeContent), projectName) if err != nil { previousError = err.Error() - slog.Debug(fmt.Sprintf("Invalid compose file received: %v. Retrying...", err)) + slog.Debug("Invalid compose file received. Retrying...", "err", err) continue } diff --git a/src/pkg/track/track.go b/src/pkg/track/track.go index c1efde73f..dc63f5be9 100644 --- a/src/pkg/track/track.go +++ b/src/pkg/track/track.go @@ -1,7 +1,6 @@ package track import ( - "fmt" "log/slog" "strings" "sync" @@ -41,10 +40,10 @@ func Evt(name string, props ...Property) { } tracker := Tracker if tracker == nil { - slog.Debug(fmt.Sprintf("untracked event %q: %v", name, props)) + slog.Debug("untracked event", "name", name, "props", props) return } - slog.Debug(fmt.Sprintf("tracking event %q: %v", name, props)) + slog.Debug("tracking event", "name", name, "props", props) trackWG.Add(1) go func() { defer trackWG.Done()