Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions src/api/pc_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,25 @@ func (api *PcApi) RestartProcess(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"name": name})
}

// @Schemes
// @Id RestartAllProcesses
// @Description Restarts all processes
// @Tags Process
// @Summary Restart all processes
// @Produce json
// @Success 200 {object} api.StatusResponse "Restart All Status"
// @Failure 400 {object} map[string]string
// @Router /processes/restart [post]
func (api *PcApi) RestartAllProcesses(c *gin.Context) {
err := api.project.RestartAllProcesses()
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}

c.JSON(http.StatusOK, gin.H{"status": "restarted"})
}

// @Schemes
// @Id ScaleProcess
// @Description Scale a process
Expand Down
1 change: 1 addition & 0 deletions src/api/routes.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ func InitRoutes(useLogger bool, handler *PcApi) *gin.Engine {
r.PATCH("/processes/stop", handler.StopProcesses)
r.POST("/process/start/:name", handler.StartProcess)
r.POST("/process/restart/:name", handler.RestartProcess)
r.POST("/processes/restart", handler.RestartAllProcesses)
r.POST("/project/stop", handler.ShutDownProject)
r.POST("/project", handler.UpdateProject)
r.POST("/project/configuration", handler.ReloadProject)
Expand Down
1 change: 1 addition & 0 deletions src/app/project_interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ type IProject interface {
StopProcesses(names []string) (map[string]string, error)
StartProcess(name string) error
RestartProcess(name string) error
RestartAllProcesses() error
ScaleProcess(name string, scale int) error
GetProcessPorts(name string) (*types.ProcessPorts, error)
SetProcessPassword(name string, password string) error
Expand Down
110 changes: 110 additions & 0 deletions src/app/project_runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ type ProjectRunner struct {
procCompleteChannel chan int
processTree *ProcessTree
processScheduler *scheduler.Scheduler
isRestartingAll bool
restartAllMutex sync.Mutex
}

// RestartCall represents an in-flight restart operation
Expand Down Expand Up @@ -161,6 +163,14 @@ func (p *ProjectRunner) Run() error {
case runProcCount := <-p.procCompleteChannel:
log.Debug().Msgf("Remaining processes: %d", runProcCount)
if runProcCount == 0 {
// Check if a restart-all operation is in progress
p.restartAllMutex.Lock()
isRestarting := p.isRestartingAll
p.restartAllMutex.Unlock()
if isRestarting {
log.Debug().Msg("Restart all in progress, not exiting")
continue
}
if p.processScheduler == nil || len(p.processScheduler.GetScheduledProcesses()) == 0 {
log.Info().Msg("Project completed")
p.exitCodeMutex.Lock()
Expand Down Expand Up @@ -561,6 +571,106 @@ func (p *ProjectRunner) doRestart(name string) error {
return nil
}

func (p *ProjectRunner) RestartAllProcesses() error {
log.Info().Msg("Restarting all processes")

// Set flag to prevent main loop from exiting when process count reaches 0
p.restartAllMutex.Lock()
p.isRestartingAll = true
p.restartAllMutex.Unlock()

// Build shutdown order
p.runProcMutex.Lock()
shutdownOrder := []*Process{}
if p.isOrderedShutdown {
err := p.project.WithProcesses([]string{}, func(process types.ProcessConfig) error {
if runningProc, ok := p.runningProcesses[process.ReplicaName]; ok {
shutdownOrder = append(shutdownOrder, runningProc)
}
return nil
})
if err != nil {
log.Error().Msgf("Failed to build project run order: %s", err.Error())
}
slices.Reverse(shutdownOrder)
} else {
for _, proc := range p.runningProcesses {
shutdownOrder = append(shutdownOrder, proc)
}
}
p.runProcMutex.Unlock()

var nameOrder []string
for _, v := range shutdownOrder {
nameOrder = append(nameOrder, v.getName())
}
log.Debug().Msgf("Stopping %d processes for restart. Order: %q", len(shutdownOrder), nameOrder)

// Prepare all processes for shutdown (prevents auto-restart)
for _, proc := range shutdownOrder {
proc.prepareForShutDown()
}

// Stop all processes
p.shutDownAndWait(shutdownOrder)
Comment on lines +601 to +615
Copy link

Copilot AI Jan 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The restartAllMutex is used to protect the isRestartingAll flag, but there's a potential race condition window. Between lines 601 and 609, the runProcMutex is released but processes haven't been shut down yet. If another operation tries to interact with processes during this window, it might see inconsistent state. Consider holding the runProcMutex for the entire shutdown phase, or document why this early release is safe.

Copilot uses AI. Check for mistakes.

// Clear done processes map
p.doneProcMutex.Lock()
p.doneProcesses = make(map[string]*Process)
Comment on lines +617 to +619
Copy link

Copilot AI Jan 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Clearing the doneProcesses map during restart-all could cause issues if there are references to these Process objects elsewhere in the code. Before clearing, verify that all observers and references to done processes are properly cleaned up. Consider whether the processes in doneProcesses should be explicitly cleaned up before clearing the map to avoid potential resource leaks.

Suggested change
// Clear done processes map
p.doneProcMutex.Lock()
p.doneProcesses = make(map[string]*Process)
// Clear done processes map (in-place to avoid changing map identity)
p.doneProcMutex.Lock()
for name := range p.doneProcesses {
delete(p.doneProcesses, name)
}

Copilot uses AI. Check for mistakes.
p.doneProcMutex.Unlock()

// Reset process states to pending
p.statesMutex.Lock()
for name, state := range p.processStates {
state.Status = types.ProcessStatePending
state.Pid = 0
state.ExitCode = 0
state.IsRunning = false
Copy link

Copilot AI Jan 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The process state reset sets all states to Pending and clears runtime information, but it doesn't reset other fields like SystemTime, Health, Mem, CPU, or Restarts. This could lead to stale information being displayed in the UI immediately after restart. Consider whether these fields should also be reset, or if there's a mechanism to refresh them when processes start.

Suggested change
state.IsRunning = false
state.IsRunning = false
state.SystemTime = ""
state.Health = ""
state.Mem = 0
state.CPU = 0
state.Restarts = 0

Copilot uses AI. Check for mistakes.
p.processStates[name] = state
}
p.statesMutex.Unlock()

// Build run order (same as initial Run())
runOrder := []types.ProcessConfig{}
err := p.project.WithProcesses([]string{}, func(process types.ProcessConfig) error {
if process.IsDeferred() {
return nil
}
runOrder = append(runOrder, process)
return nil
})
if err != nil {
// Clear the restart flag on error
p.restartAllMutex.Lock()
p.isRestartingAll = false
p.restartAllMutex.Unlock()
return fmt.Errorf("failed to build project run order: %w", err)
Comment on lines +643 to +647
Copy link

Copilot AI Jan 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If an error occurs while building the run order (line 641), the restart flag is properly cleared, but the system is left in an inconsistent state - all processes have been stopped but won't be restarted. Consider whether partial recovery is possible (e.g., attempting to restart processes that were successfully configured) or if this error scenario should trigger a more graceful degradation.

Suggested change
// Clear the restart flag on error
p.restartAllMutex.Lock()
p.isRestartingAll = false
p.restartAllMutex.Unlock()
return fmt.Errorf("failed to build project run order: %w", err)
// If we could not collect any process at all, treat this as fatal
if len(runOrder) == 0 {
// Clear the restart flag on error
p.restartAllMutex.Lock()
p.isRestartingAll = false
p.restartAllMutex.Unlock()
return fmt.Errorf("failed to build project run order: %w", err)
}
// Otherwise, log the error but continue with the processes we did collect
log.Error().Err(err).Msg("failed to fully build project run order; proceeding with partial run order")

Copilot uses AI. Check for mistakes.
}

var startOrder []string
for _, v := range runOrder {
startOrder = append(startOrder, v.ReplicaName)
}
log.Debug().Msgf("Starting %d processes. Order: %q", len(runOrder), startOrder)

// Start all processes
for _, proc := range runOrder {
if proc.Schedule != nil && proc.Schedule.IsScheduled() {
continue
}
newConf := proc
p.runProcess(&newConf)
}

// Clear the restart flag now that new processes are started
p.restartAllMutex.Lock()
p.isRestartingAll = false
p.restartAllMutex.Unlock()

log.Info().Msg("All processes restarted")
return nil
}

func (p *ProjectRunner) GetProcessInfo(name string) (*types.ProcessConfig, error) {
p.runProcMutex.Lock()
defer p.runProcMutex.Unlock()
Expand Down
4 changes: 4 additions & 0 deletions src/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,10 @@ func (p *PcClient) RestartProcess(name string) error {
return p.restartProcess(name)
}

func (p *PcClient) RestartAllProcesses() error {
return p.restartAllProcesses()
}

func (p *PcClient) ScaleProcess(name string, scale int) error {
return p.scaleProcess(name, scale)
}
Expand Down
21 changes: 20 additions & 1 deletion src/client/restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,9 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/rs/zerolog/log"
"net/http"

"github.com/rs/zerolog/log"
)

func (p *PcClient) restartProcess(name string) error {
Expand All @@ -25,3 +26,21 @@ func (p *PcClient) restartProcess(name string) error {
}
return errors.New(respErr.Error)
}

func (p *PcClient) restartAllProcesses() error {
url := fmt.Sprintf("http://%s/processes/restart", p.address)
resp, err := p.client.Post(url, "application/json", nil)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return nil
}
var respErr pcError
if err = json.NewDecoder(resp.Body).Decode(&respErr); err != nil {
log.Error().Msgf("failed to decode restart all processes response: %v", err)
return err
}
return errors.New(respErr.Error)
}
6 changes: 6 additions & 0 deletions src/tui/actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ const (
ActionProcessInfo = ActionName("process_info")
ActionProcessStop = ActionName("process_stop")
ActionProcessRestart = ActionName("process_restart")
ActionProcessRestartAll = ActionName("process_restart_all")
ActionProcessScreen = ActionName("process_screen")
ActionQuit = ActionName("quit")
ActionLogFind = ActionName("find")
Expand Down Expand Up @@ -57,6 +58,7 @@ var defaultShortcuts = map[ActionName]tcell.Key{
ActionProcessStart: tcell.KeyF7,
ActionProcessStop: tcell.KeyF9,
ActionProcessRestart: tcell.KeyCtrlR,
ActionProcessRestartAll: tcell.KeyCtrlU,
ActionProcessScreen: tcell.KeyF8,
ActionQuit: tcell.KeyF10,
ActionLogFind: tcell.KeyCtrlF,
Expand Down Expand Up @@ -109,6 +111,7 @@ var procActionsOrder = []ActionName{
ActionProcessScreen,
ActionProcessStop,
ActionProcessRestart,
ActionProcessRestartAll,
ActionEditProcess,
ActionReloadConfig,
ActionNsFilter,
Expand Down Expand Up @@ -342,6 +345,9 @@ func newShortCuts() *ShortCuts {
ActionProcessRestart: {
Description: "Restart",
},
ActionProcessRestartAll: {
Description: "Restart All",
},
ActionQuit: {
Description: "Quit",
},
Expand Down
73 changes: 73 additions & 0 deletions src/tui/all-logs-observer.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
package tui

import (
"fmt"
"hash/fnv"
"math"

"github.com/f1bonacc1/process-compose/src/pclog"
)

// tviewColors is a list of tview color names for process name prefixes.
var tviewColors = []string{
"red",
"green",
"yellow",
"blue",
"magenta",
"cyan",
"orange",
"pink",
"lime",
"aqua",
"violet",
"gold",
}

// getProcessColor returns a tview color name for the given process name.
func getProcessColor(name string) string {
hash := fnv.New32a()
hash.Write([]byte(name))
return tviewColors[int(hash.Sum32())%len(tviewColors)]
}
Comment on lines +12 to +32
Copy link

Copilot AI Jan 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The hash-based color assignment uses FNV-1a hash to deterministically assign colors to process names. With 12 colors available, processes will inevitably share colors in larger projects. This is acceptable for visual distinction, but consider documenting this limitation. Also, some color combinations (e.g., "yellow" on light backgrounds) may have poor readability - consider testing with different terminal themes or allowing color customization.

Copilot uses AI. Check for mistakes.

// AllLogsObserver wraps a LogView and prefixes log lines with a colored process name.
// It implements pclog.LogObserver interface.
type AllLogsObserver struct {
processName string
logView *LogView
color string
uniqueID string
}

// NewAllLogsObserver creates a new AllLogsObserver for the given process.
func NewAllLogsObserver(processName string, logView *LogView) *AllLogsObserver {
return &AllLogsObserver{
processName: processName,
logView: logView,
color: getProcessColor(processName),
uniqueID: pclog.GenerateUniqueID(10),
}
}

// WriteString writes a log line prefixed with the colored process name.
func (o *AllLogsObserver) WriteString(line string) (n int, err error) {
return o.logView.WriteStringWithProcess(line, o.processName, o.color)
}

// SetLines sets multiple log lines, each prefixed with the colored process name.
func (o *AllLogsObserver) SetLines(lines []string) {
for _, line := range lines {
_, _ = o.WriteString(line)
}
}

// GetTailLength returns the tail length for log subscription.
func (o *AllLogsObserver) GetTailLength() int {
return math.MaxInt
}

// GetUniqueID returns the unique identifier for this observer.
func (o *AllLogsObserver) GetUniqueID() string {
return fmt.Sprintf("all-logs-%s-%s", o.processName, o.uniqueID)
}
Loading