diff --git a/README.md b/README.md
index 278b974d7..786c008e5 100644
--- a/README.md
+++ b/README.md
@@ -255,6 +255,7 @@ The following sets of tools are available (toolsets marked with ✓ in the Defau
| core | Most common tools for Kubernetes management (Pods, Generic Resources, Events, etc.) | ✓ |
| helm | Tools for managing Helm charts and releases | ✓ |
| kcp | Manage kcp workspaces and multi-tenancy features | |
+| openshift | OpenShift-specific tools for cluster management and troubleshooting, check the [OpenShift documentation](docs/OPENSHIFT.md) for more details. | |
| kubevirt | KubeVirt virtual machine management tools | |
| observability | Cluster observability tools for querying Prometheus metrics and Alertmanager alerts | ✓ |
| ossm | Most common tools for managing OSSM, check the [OSSM documentation](https://github.com/openshift/openshift-mcp-server/blob/main/docs/OSSM.md) for more details. | |
@@ -553,6 +554,25 @@ Common use cases:
+
+
+openshift
+
+- **plan_mustgather** - Plan for collecting a must-gather archive from an OpenShift cluster, must-gather is a tool for collecting cluster data related to debugging and troubleshooting like logs, kubernetes resources, etc.
+ - `node_name` (`string`) - Optional node to run the mustgather pod. If not provided, a random control-plane node will be selected automatically
+ - `node_selector` (`string`) - Optional node label selector to use, only relevant when specifying a command and image which needs to capture data on a set of cluster nodes simultaneously
+ - `host_network` (`boolean`) - Optionally run the must-gather pods in the host network of the node. This is only relevant if a specific gather image needs to capture host-level data
+ - `gather_command` (`string`) - Optionally specify a custom gather command to run a specialized script, eg. /usr/bin/gather_audit_logs (default: /usr/bin/gather)
+ - `all_component_images` (`boolean`) - Optional when enabled, collects and runs multiple must gathers for all operators and components on the cluster that have an annotated must-gather image available
+ - `images` (`array`) - Optional list of images to use for gathering custom information about specific operators or cluster components. If not specified, OpenShift's default must-gather image will be used by default
+ - `source_dir` (`string`) - Optional to set a specific directory where the pod will copy gathered data from (default: /must-gather)
+ - `timeout` (`string`) - Timeout of the gather process eg. 30s, 6m20s, or 2h10m30s
+ - `namespace` (`string`) - Optional to specify an existing privileged namespace where must-gather pods should run. If not provided, a temporary namespace will be created
+ - `keep_resources` (`boolean`) - Optional to retain all temporary resources when the mustgather completes, otherwise temporary resources created will be advised to be cleaned up
+ - `since` (`string`) - Optional to collect logs newer than a relative duration like 5s, 2m5s, or 3h6m10s. If unspecified, all available logs will be collected
+
+
+
diff --git a/docs/OPENSHIFT.md b/docs/OPENSHIFT.md
new file mode 100644
index 000000000..67f9b9486
--- /dev/null
+++ b/docs/OPENSHIFT.md
@@ -0,0 +1,219 @@
+# OpenShift Toolset
+
+This toolset provides OpenShift-specific prompts for cluster management and troubleshooting.
+
+## Prompts
+
+### plan_mustgather
+
+Plan for collecting a must-gather archive from an OpenShift cluster. Must-gather is a tool for collecting cluster data related to debugging and troubleshooting like logs, Kubernetes resources, and more.
+
+This prompt generates YAML manifests for the must-gather resources that can be applied to the cluster.
+
+**Arguments:**
+- `node_name` (optional) - Specific node name to run must-gather pod on
+- `node_selector` (optional) - Node selector in `key=value,key2=value2` format to filter nodes for the pod
+- `source_dir` (optional) - Custom gather directory inside pod (default: `/must-gather`)
+- `namespace` (optional) - Privileged namespace to use for must-gather (auto-generated if not specified)
+- `gather_command` (optional) - Custom gather command e.g. `/usr/bin/gather_audit_logs` (default: `/usr/bin/gather`)
+- `timeout` (optional) - Timeout duration for gather command (e.g., `30m`, `1h`)
+- `since` (optional) - Only gather data newer than this duration (e.g., `5s`, `2m5s`, or `3h6m10s`), defaults to all data
+- `host_network` (optional) - Use host network for must-gather pod (`true`/`false`)
+- `keep_resources` (optional) - Keep pod resources after collection (`true`/`false`, default: `false`)
+- `all_component_images` (optional) - Include must-gather images from all installed operators (`true`/`false`)
+- `images` (optional) - Comma-separated list of custom must-gather container images
+
+**Example:**
+```
+# Basic must-gather collection
+{}
+
+# Collect with custom timeout and since
+{
+ "timeout": "30m",
+ "since": "1h"
+}
+
+# Collect from all component images
+{
+ "all_component_images": "true"
+}
+
+# Collect from specific operator image
+{
+ "images": "registry.redhat.io/openshift-logging/cluster-logging-rhel9-operator@sha256:..."
+}
+```
+
+## Enable the OpenShift Toolset
+
+### Option 1: Command Line
+
+```bash
+kubernetes-mcp-server --toolsets core,config,helm,openshift
+```
+
+### Option 2: Configuration File
+
+```toml
+toolsets = ["core", "config", "helm", "openshift"]
+```
+
+### Option 3: MCP Client Configuration
+
+```json
+{
+ "mcpServers": {
+ "kubernetes": {
+ "command": "npx",
+ "args": ["-y", "kubernetes-mcp-server@latest", "--toolsets", "core,config,helm,openshift"]
+ }
+ }
+}
+```
+
+## Prerequisites
+
+The OpenShift toolset requires:
+
+1. **OpenShift cluster** - These prompts are designed for OpenShift and automatically detect the cluster type
+2. **Proper RBAC** - The user/service account must have permissions to:
+ - Create namespaces
+ - Create service accounts
+ - Create cluster role bindings
+ - Create pods with privileged access
+ - List ClusterOperators and ClusterServiceVersions (for `all_component_images`)
+
+## How It Works
+
+### Must-Gather Plan Generation
+
+The `plan_mustgather` prompt generates YAML manifests for collecting diagnostic data from an OpenShift cluster:
+
+1. **Namespace** - A temporary namespace (e.g., `openshift-must-gather-xyz`) is created unless an existing namespace is specified
+2. **ServiceAccount** - A service account with cluster-admin permissions is created for the must-gather pod
+3. **ClusterRoleBinding** - Binds the service account to the cluster-admin role
+4. **Pod** - Runs the must-gather container(s) with the specified configuration
+
+### Component Image Discovery
+
+When `all_component_images` is enabled, the prompt discovers must-gather images from:
+- **ClusterOperators** - Looks for the `operators.openshift.io/must-gather-image` annotation
+- **ClusterServiceVersions** - Checks OLM-installed operators for the same annotation
+
+### Multiple Images Support
+
+Up to 8 gather images can be run concurrently. Each image runs in a separate container within the same pod, sharing the output volume.
+
+## Common Use Cases
+
+### Basic Cluster Diagnostics
+
+Collect general cluster diagnostics:
+```json
+{}
+```
+
+### Audit Logs Collection
+
+Collect audit logs with a custom gather command:
+```json
+{
+ "gather_command": "/usr/bin/gather_audit_logs",
+ "timeout": "2h"
+}
+```
+
+### Recent Logs Only
+
+Collect logs from the last 30 minutes:
+```json
+{
+ "since": "30m"
+}
+```
+
+### Specific Operator Diagnostics
+
+Collect diagnostics for a specific operator:
+```json
+{
+ "images": "registry.redhat.io/openshift-logging/cluster-logging-rhel9-operator@sha256:..."
+}
+```
+
+### Host Network Access
+
+For gather scripts that need host-level network access:
+```json
+{
+ "host_network": "true"
+}
+```
+
+### All Component Diagnostics
+
+Collect diagnostics from all operators with must-gather images:
+```json
+{
+ "all_component_images": "true",
+ "timeout": "1h"
+}
+```
+
+## Troubleshooting
+
+### Permission Errors
+
+If you see permission warnings, ensure your user has the required RBAC permissions:
+```bash
+oc auth can-i create namespaces
+oc auth can-i create clusterrolebindings
+oc auth can-i create pods --as=system:serviceaccount:openshift-must-gather-xxx:must-gather-collector
+```
+
+### Pod Not Starting
+
+Check if the node has enough resources and can pull the must-gather image:
+```bash
+oc get pods -n openshift-must-gather-xxx
+oc describe pod -n openshift-must-gather-xxx
+```
+
+### Timeout Issues
+
+For large clusters or audit log collection, increase the timeout:
+```json
+{
+ "timeout": "2h"
+}
+```
+
+### Image Pull Errors
+
+Ensure the must-gather image is accessible:
+```bash
+oc get secret -n openshift-config pull-secret
+```
+
+## Security Considerations
+
+### Privileged Access
+
+The must-gather pods run with:
+- `cluster-admin` ClusterRoleBinding
+- `system-cluster-critical` priority class
+- Tolerations for all taints
+- Optional host network access
+
+### Temporary Resources
+
+By default, all created resources (namespace, service account, cluster role binding) should be cleaned up after the must-gather collection is complete. Use `"keep_resources": "true"` to retain them for debugging.
+
+### Image Sources
+
+The prompt uses these default images:
+- **Must-gather**: `registry.redhat.io/openshift4/ose-must-gather:latest`
+- **Wait container**: `registry.redhat.io/ubi9/ubi-minimal`
+
+Custom images should be from trusted sources.
diff --git a/pkg/mcp/modules.go b/pkg/mcp/modules.go
index 0cbcefbdd..b0204a6d5 100644
--- a/pkg/mcp/modules.go
+++ b/pkg/mcp/modules.go
@@ -9,4 +9,5 @@ import (
_ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/kubevirt"
_ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/netedge"
_ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/observability"
+ _ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/openshift"
)
diff --git a/pkg/mcp/testdata/toolsets-openshift-prompts.json b/pkg/mcp/testdata/toolsets-openshift-prompts.json
new file mode 100644
index 000000000..0d7162dcc
--- /dev/null
+++ b/pkg/mcp/testdata/toolsets-openshift-prompts.json
@@ -0,0 +1,52 @@
+[
+ {
+ "name": "plan_mustgather",
+ "description": "Plan for collecting a must-gather archive from an OpenShift cluster. Must-gather is a tool for collecting cluster data related to debugging and troubleshooting like logs, kubernetes resources, etc.",
+ "arguments": [
+ {
+ "name": "node_name",
+ "description": "Specific node name to run must-gather pod on"
+ },
+ {
+ "name": "node_selector",
+ "description": "Node selector in key=value,key2=value2 format to filter nodes for the pod"
+ },
+ {
+ "name": "source_dir",
+ "description": "Custom gather directory inside pod (default: /must-gather)"
+ },
+ {
+ "name": "namespace",
+ "description": "Privileged namespace to use for must-gather (auto-generated if not specified)"
+ },
+ {
+ "name": "gather_command",
+ "description": "Custom gather command eg. /usr/bin/gather_audit_logs (default: /usr/bin/gather)"
+ },
+ {
+ "name": "timeout",
+ "description": "Timeout duration for gather command (eg. 30m, 1h)"
+ },
+ {
+ "name": "since",
+ "description": "Only gather data newer than this duration (eg. 5s, 2m5s, or 3h6m10s) defaults to all data."
+ },
+ {
+ "name": "host_network",
+ "description": "Use host network for must-gather pod (true/false)"
+ },
+ {
+ "name": "keep_resources",
+ "description": "Keep pod resources after collection (true/false, default: false)"
+ },
+ {
+ "name": "all_component_images",
+ "description": "Include must-gather images from all installed operators (true/false)"
+ },
+ {
+ "name": "images",
+ "description": "Comma-separated list of custom must-gather container images"
+ }
+ ]
+ }
+]
diff --git a/pkg/mcp/testdata/toolsets-openshift-tools.json b/pkg/mcp/testdata/toolsets-openshift-tools.json
new file mode 100644
index 000000000..fe51488c7
--- /dev/null
+++ b/pkg/mcp/testdata/toolsets-openshift-tools.json
@@ -0,0 +1 @@
+[]
diff --git a/pkg/mcp/toolsets_test.go b/pkg/mcp/toolsets_test.go
index a47b59996..125d48a72 100644
--- a/pkg/mcp/toolsets_test.go
+++ b/pkg/mcp/toolsets_test.go
@@ -19,6 +19,7 @@ import (
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/kcp"
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/kiali"
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/kubevirt"
+ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/openshift"
"github.com/modelcontextprotocol/go-sdk/mcp"
"github.com/stretchr/testify/suite"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
@@ -180,6 +181,42 @@ func (s *ToolsetsSuite) TestGranularToolsetsTools() {
}
}
+func (s *ToolsetsSuite) TestOpenShiftToolset() {
+ s.Run("OpenShift toolset in OpenShift cluster", func() {
+ s.Handle(test.NewInOpenShiftHandler())
+ toolsets.Clear()
+ toolsets.Register(&openshift.Toolset{})
+ s.Cfg.Toolsets = []string{"openshift"}
+ s.InitMcpClient()
+ tools, err := s.ListTools()
+ s.Run("ListTools returns tools", func() {
+ s.NotNil(tools, "Expected tools from ListTools")
+ s.NoError(err, "Expected no error from ListTools")
+ })
+ s.Run("ListTools returns correct Tool metadata", func() {
+ s.assertJsonSnapshot("toolsets-openshift-tools.json", tools.Tools)
+ })
+ })
+}
+
+func (s *ToolsetsSuite) TestOpenShiftToolsetPrompts() {
+ s.Run("OpenShift toolset prompts in OpenShift cluster", func() {
+ s.Handle(test.NewInOpenShiftHandler())
+ toolsets.Clear()
+ toolsets.Register(&openshift.Toolset{})
+ s.Cfg.Toolsets = []string{"openshift"}
+ s.InitMcpClient()
+ prompts, err := s.ListPrompts()
+ s.Run("ListPrompts returns prompts", func() {
+ s.NotNil(prompts, "Expected prompts from ListPrompts")
+ s.NoError(err, "Expected no error from ListPrompts")
+ })
+ s.Run("ListPrompts returns correct Prompt metadata", func() {
+ s.assertJsonSnapshot("toolsets-openshift-prompts.json", prompts.Prompts)
+ })
+ })
+}
+
func (s *ToolsetsSuite) TestInputSchemaEdgeCases() {
//https://github.com/containers/kubernetes-mcp-server/issues/340
s.Run("InputSchema for no-arg tool is object with empty properties", func() {
diff --git a/pkg/ocp/mustgather/helpers_test.go b/pkg/ocp/mustgather/helpers_test.go
new file mode 100644
index 000000000..18097139c
--- /dev/null
+++ b/pkg/ocp/mustgather/helpers_test.go
@@ -0,0 +1,148 @@
+package mustgather
+
+import (
+ "context"
+
+ authv1 "k8s.io/api/authorization/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/dynamic"
+ fakedynamic "k8s.io/client-go/dynamic/fake"
+ "k8s.io/client-go/kubernetes"
+ authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/restmapper"
+ "k8s.io/client-go/tools/clientcmd"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+ metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
+)
+
+// mockSelfSubjectAccessReviews implements authorizationv1.SelfSubjectAccessReviewInterface.
+// Default behaviour returns allowed: true; use KnownAccessor to deny specific resources.
+type mockSelfSubjectAccessReviews struct {
+ authorizationv1.SelfSubjectAccessReviewInterface
+ KnownAccessor map[string]bool
+}
+
+func (m *mockSelfSubjectAccessReviews) Create(ctx context.Context, review *authv1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*authv1.SelfSubjectAccessReview, error) {
+ review.Status.Allowed = true
+
+ ra := review.Spec.ResourceAttributes
+ keysToCheck := []string{
+ ra.Verb + ":" + ra.Group + ":" + ra.Resource + ":" + ra.Namespace + ":" + ra.Name,
+ ra.Verb + ":" + ra.Group + ":" + ra.Resource + ":" + ra.Namespace + ":",
+ ra.Verb + ":" + ra.Group + ":" + ra.Resource + "::" + ra.Name,
+ ra.Verb + ":" + ra.Group + ":" + ra.Resource + "::",
+ }
+
+ for _, key := range keysToCheck {
+ if allowed, ok := m.KnownAccessor[key]; ok {
+ review.Status.Allowed = allowed
+ return review, nil
+ }
+ }
+
+ return review, nil
+}
+
+// mockAuthorizationV1Client implements authorizationv1.AuthorizationV1Interface
+type mockAuthorizationV1Client struct {
+ authorizationv1.AuthorizationV1Interface
+ KnownAccessor map[string]bool
+}
+
+func (m *mockAuthorizationV1Client) SelfSubjectAccessReviews() authorizationv1.SelfSubjectAccessReviewInterface {
+ return &mockSelfSubjectAccessReviews{KnownAccessor: m.KnownAccessor}
+}
+
+// resettableRESTMapper wraps a RESTMapper and adds Reset()
+type resettableRESTMapper struct {
+ meta.RESTMapper
+}
+
+func (r *resettableRESTMapper) Reset() {}
+
+// fakeDiscoveryClient implements discovery.CachedDiscoveryInterface
+type fakeDiscoveryClient struct {
+ discovery.CachedDiscoveryInterface
+}
+
+func (f *fakeDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
+ return &metav1.APIResourceList{GroupVersion: groupVersion}, nil
+}
+
+func (f *fakeDiscoveryClient) Invalidate() {}
+func (f *fakeDiscoveryClient) Fresh() bool { return true }
+
+// mockKubernetesClient implements api.KubernetesClient with minimal stubs.
+type mockKubernetesClient struct {
+ kubernetes.Interface
+ knownAccessor map[string]bool
+ dynClient dynamic.Interface
+ mapper *resettableRESTMapper
+ discClient *fakeDiscoveryClient
+}
+
+func newMockKubernetesClient(knownAccessor map[string]bool) *mockKubernetesClient {
+ scheme := runtime.NewScheme()
+ _ = corev1.AddToScheme(scheme)
+
+ return &mockKubernetesClient{
+ knownAccessor: knownAccessor,
+ dynClient: fakedynamic.NewSimpleDynamicClient(scheme),
+ mapper: &resettableRESTMapper{RESTMapper: restmapper.NewDiscoveryRESTMapper(nil)},
+ discClient: &fakeDiscoveryClient{},
+ }
+}
+
+func (m *mockKubernetesClient) NamespaceOrDefault(namespace string) string {
+ if namespace != "" {
+ return namespace
+ }
+ return "default"
+}
+
+func (m *mockKubernetesClient) RESTConfig() *rest.Config {
+ return &rest.Config{Host: "https://fake-server:6443"}
+}
+
+func (m *mockKubernetesClient) RESTMapper() meta.ResettableRESTMapper {
+ return m.mapper
+}
+
+func (m *mockKubernetesClient) DiscoveryClient() discovery.CachedDiscoveryInterface {
+ return m.discClient
+}
+
+func (m *mockKubernetesClient) DynamicClient() dynamic.Interface {
+ return m.dynClient
+}
+
+func (m *mockKubernetesClient) MetricsV1beta1Client() *metricsv1beta1.MetricsV1beta1Client {
+ return nil
+}
+
+func (m *mockKubernetesClient) AuthorizationV1() authorizationv1.AuthorizationV1Interface {
+ return &mockAuthorizationV1Client{KnownAccessor: m.knownAccessor}
+}
+
+// genericclioptions.RESTClientGetter implementation
+
+func (m *mockKubernetesClient) ToRESTConfig() (*rest.Config, error) {
+ return m.RESTConfig(), nil
+}
+
+func (m *mockKubernetesClient) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
+ return m.discClient, nil
+}
+
+func (m *mockKubernetesClient) ToRESTMapper() (meta.RESTMapper, error) {
+ return m.mapper, nil
+}
+
+func (m *mockKubernetesClient) ToRawKubeConfigLoader() clientcmd.ClientConfig {
+ return clientcmd.NewDefaultClientConfig(*clientcmdapi.NewConfig(), nil)
+}
diff --git a/pkg/ocp/mustgather/plan_mustgather.go b/pkg/ocp/mustgather/plan_mustgather.go
new file mode 100644
index 000000000..11895ed9d
--- /dev/null
+++ b/pkg/ocp/mustgather/plan_mustgather.go
@@ -0,0 +1,377 @@
+package mustgather
+
+import (
+ "context"
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/api"
+ "github.com/containers/kubernetes-mcp-server/pkg/kubernetes"
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/rand"
+ "sigs.k8s.io/yaml"
+)
+
+const (
+ defaultGatherSourceDir = "/must-gather/"
+ defaultMustGatherImage = "registry.redhat.io/openshift4/ose-must-gather:latest"
+ defaultGatherCmd = "/usr/bin/gather"
+ mgAnnotation = "operators.openshift.io/must-gather-image"
+ maxConcurrentGathers = 8
+)
+
+var allowedGatherCommands = []string{
+ "/usr/bin/gather",
+ "/usr/bin/gather_core_dumps",
+ "/usr/bin/gather_network_logs",
+ "/usr/bin/gather_audit_logs",
+}
+
+var allowedImageRegistries = []string{
+ "registry.redhat.io",
+ "registry.access.redhat.com",
+ "registry.connect.redhat.com",
+}
+
+// PlanMustGatherParams contains the parameters for planning a must-gather collection.
+type PlanMustGatherParams struct {
+ NodeName string
+ NodeSelector map[string]string
+ HostNetwork bool
+ SourceDir string // custom gather directory inside pod, default is "/must-gather"
+ Namespace string
+ KeepResources bool
+ GatherCommand string // custom gather command, default is "/usr/bin/gather"
+ AllImages bool // whether to use custom gather images from installed operators on cluster
+ Images []string // custom list of must-gather images
+ Timeout string
+ Since string
+}
+
+// PlanMustGather generates a must-gather plan with YAML manifests for creating the required resources.
+// It returns the plan as a string containing YAML manifests and instructions.
+func PlanMustGather(ctx context.Context, k api.KubernetesClient, params PlanMustGatherParams) (string, error) {
+ dynamicClient := k.DynamicClient()
+
+ sourceDir := params.SourceDir
+ if sourceDir == "" {
+ sourceDir = defaultGatherSourceDir
+ } else {
+ sourceDir = path.Clean(sourceDir)
+ }
+
+ namespace := params.Namespace
+ if namespace == "" {
+ namespace = fmt.Sprintf("openshift-must-gather-%s", rand.String(6))
+ }
+
+ gatherCmd := make([]string, 1)
+ gatherCmd[0] = params.GatherCommand
+ if params.GatherCommand == "" {
+ gatherCmd[0] = defaultGatherCmd
+ }
+
+ if !isAllowedGatherCommand(gatherCmd[0]) {
+ return "", fmt.Errorf("gather command %q is not allowed, must be one of allowed gather commands only", gatherCmd[0])
+ }
+
+ images := params.Images
+ for _, image := range images {
+ if !isAllowedImageRegistry(image) {
+ return "", fmt.Errorf("image %q is not from an approved Red Hat registry", image)
+ }
+ }
+
+ if params.AllImages {
+ componentImages, err := getComponentImages(ctx, dynamicClient)
+ if err != nil {
+ return "", fmt.Errorf("failed to get operator images: %v", err)
+ }
+ images = append(images, componentImages...)
+ }
+
+ if len(images) > maxConcurrentGathers {
+ return "", fmt.Errorf("more than %d gather images are not supported", maxConcurrentGathers)
+ }
+
+ timeout := params.Timeout
+ if timeout != "" {
+ _, err := time.ParseDuration(timeout)
+ if err != nil {
+ return "", fmt.Errorf("timeout duration is not valid")
+ }
+ timeoutCmd := []string{"/usr/bin/timeout", timeout}
+ gatherCmd = append(timeoutCmd, gatherCmd...)
+ }
+
+ since := params.Since
+ if since != "" {
+ _, err := time.ParseDuration(since)
+ if err != nil {
+ return "", fmt.Errorf("since duration is not valid")
+ }
+ }
+
+ envVars := []corev1.EnvVar{}
+ if since != "" {
+ envVars = append(envVars, corev1.EnvVar{
+ Name: "MUST_GATHER_SINCE",
+ Value: since,
+ })
+ }
+
+ // template container for gather,
+ // if multiple images are added multiple containers in the same pod will be spin up
+ gatherContainerTemplate := corev1.Container{
+ Name: "gather",
+ Image: defaultMustGatherImage,
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ Command: gatherCmd,
+ Env: envVars,
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "must-gather-output",
+ MountPath: sourceDir,
+ },
+ },
+ }
+
+ var gatherContainers []corev1.Container
+ if len(images) > 0 {
+ gatherContainers = make([]corev1.Container, len(images))
+ for i, image := range images {
+ c := *gatherContainerTemplate.DeepCopy()
+
+ if len(images) > 1 {
+ c.Name = fmt.Sprintf("gather-%d", i+1)
+ }
+ c.Image = image
+ gatherContainers[i] = c
+ }
+ } else {
+ gatherContainers = []corev1.Container{*gatherContainerTemplate.DeepCopy()}
+ }
+
+ serviceAccountName := "must-gather-collector"
+
+ pod := &corev1.Pod{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "v1",
+ Kind: "Pod",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ // Avoiding generateName as resources_create_or_update fails without explicit name.
+ Name: fmt.Sprintf("must-gather-%s", rand.String(6)),
+ Namespace: namespace,
+ },
+ Spec: corev1.PodSpec{
+ ServiceAccountName: serviceAccountName,
+ NodeName: params.NodeName,
+ PriorityClassName: "system-cluster-critical",
+ RestartPolicy: corev1.RestartPolicyNever,
+ Volumes: []corev1.Volume{
+ {
+ Name: "must-gather-output",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ },
+ },
+ Containers: append(gatherContainers, corev1.Container{
+ Name: "wait",
+ Image: "registry.redhat.io/ubi9/ubi-minimal",
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ Command: []string{"/bin/bash", "-c", "sleep infinity"},
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "must-gather-output",
+ MountPath: "/must-gather",
+ },
+ },
+ }),
+ HostNetwork: params.HostNetwork,
+ NodeSelector: params.NodeSelector,
+ Tolerations: []corev1.Toleration{
+ {
+ Operator: "Exists",
+ },
+ },
+ },
+ }
+
+ namespaceExists := false
+ k8sCore := kubernetes.NewCore(k)
+ _, err := k8sCore.ResourcesGet(ctx, &schema.GroupVersionKind{
+ Group: "",
+ Version: "v1",
+ Kind: "Namespace",
+ }, "", namespace)
+ if err == nil {
+ namespaceExists = true
+ }
+
+ var namespaceObj *corev1.Namespace
+ if !namespaceExists {
+ namespaceObj = &corev1.Namespace{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "v1",
+ Kind: "Namespace",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace,
+ },
+ }
+ }
+
+ serviceAccount := &corev1.ServiceAccount{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "v1",
+ Kind: "ServiceAccount",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: serviceAccountName,
+ Namespace: namespace,
+ },
+ }
+
+ clusterRoleBindingName := fmt.Sprintf("%s-must-gather-collector", namespace)
+ clusterRoleBinding := &rbacv1.ClusterRoleBinding{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "rbac.authorization.k8s.io/v1",
+ Kind: "ClusterRoleBinding",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: clusterRoleBindingName,
+ },
+ RoleRef: rbacv1.RoleRef{
+ APIGroup: "rbac.authorization.k8s.io",
+ Kind: "ClusterRole",
+ Name: "cluster-admin",
+ },
+ Subjects: []rbacv1.Subject{
+ {
+ Kind: "ServiceAccount",
+ Name: serviceAccountName,
+ Namespace: namespace,
+ },
+ },
+ }
+
+ allowChecks := map[string]struct {
+ schema.GroupVersionResource
+ name string
+ verb string
+ }{
+ "create_namespace": {
+ GroupVersionResource: schema.GroupVersionResource{Version: "v1", Resource: "namespaces"},
+ verb: "create",
+ },
+ "create_serviceaccount": {
+ GroupVersionResource: schema.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"},
+ verb: "create",
+ },
+ "create_clusterrolebinding": {
+ GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterrolebindings"},
+ verb: "create",
+ },
+ "create_pod": {
+ GroupVersionResource: schema.GroupVersionResource{Version: "v1", Resource: "pods"},
+ verb: "create",
+ },
+ "use_scc_hostnetwork": {
+ GroupVersionResource: schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "securitycontextconstraints"},
+ name: "hostnetwork-v2",
+ verb: "use",
+ },
+ }
+ isAllowed := make(map[string]bool)
+
+ authClient := k.AuthorizationV1()
+ for key, check := range allowChecks {
+ allowed, _ := kubernetes.CanI(ctx, authClient, &check.GroupVersionResource, "", check.name, check.verb)
+ isAllowed[key] = allowed
+ }
+
+ var result strings.Builder
+ result.WriteString("Plan contains YAML manifests for must-gather pods and required resources (namespace, serviceaccount, clusterrolebinding). " +
+ "Suggest how the user can apply the manifest and copy results locally (`oc cp` / `kubectl cp`). \n\n",
+ )
+ result.WriteString("Ask the user if they want to apply the plan \n" +
+ "- use the resource_create_or_update tool to apply the manifest \n" +
+ "- alternatively, advise the user to execute `oc apply` / `kubectl apply` instead. \n\n",
+ )
+
+ if !params.KeepResources {
+ result.WriteString("Once the must-gather collection is completed, the user may wish to cleanup the created resources. \n" +
+ "- use the resources_delete tool to delete the namespace and the clusterrolebinding \n" +
+ "- or, execute cleanup using `kubectl delete`. \n\n")
+ }
+
+ if !namespaceExists && isAllowed["create_namespace"] {
+ namespaceYaml, err := yaml.Marshal(namespaceObj)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal namespace to yaml: %w", err)
+ }
+
+ result.WriteString("```yaml\n")
+ result.Write(namespaceYaml)
+ result.WriteString("```\n\n")
+ }
+
+ if !namespaceExists && !isAllowed["create_namespace"] {
+ result.WriteString("WARNING: The resources_create_or_update call does not have permission to create namespace(s).\n")
+ }
+
+ // yaml(s) are dumped into individual code blocks of ``` ```
+ // because resources_create_or_update tool call fails when content has more than one more resource,
+ // some models are smart to detect an error and retry with one resource a time though.
+
+ serviceAccountYaml, err := yaml.Marshal(serviceAccount)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal service account to yaml: %w", err)
+ }
+ result.WriteString("```yaml\n")
+ result.Write(serviceAccountYaml)
+ result.WriteString("```\n\n")
+
+ if !isAllowed["create_serviceaccount"] {
+ result.WriteString("WARNING: The resources_create_or_update call does not have permission to create serviceaccount(s).\n")
+ }
+
+ clusterRoleBindingYaml, err := yaml.Marshal(clusterRoleBinding)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal cluster role binding to yaml: %w", err)
+ }
+
+ result.WriteString("```yaml\n")
+ result.Write(clusterRoleBindingYaml)
+ result.WriteString("```\n\n")
+
+ if !isAllowed["create_clusterrolebinding"] {
+ result.WriteString("WARNING: The resources_create_or_update call does not have permission to create clusterrolebinding(s).\n")
+ }
+
+ podYaml, err := yaml.Marshal(pod)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal pod to yaml: %w", err)
+ }
+
+ result.WriteString("```yaml\n")
+ result.Write(podYaml)
+ result.WriteString("```\n")
+
+ if !isAllowed["create_pod"] {
+ result.WriteString("WARNING: The resources_create_or_update call does not have permission to create pod(s).\n")
+ }
+
+ if params.HostNetwork && !isAllowed["use_scc_hostnetwork"] {
+ result.WriteString("WARNING: The resources_create_or_update call does not have permission to create pod(s) with hostNetwork: true.\n")
+ }
+
+ return result.String(), nil
+}
diff --git a/pkg/ocp/mustgather/plan_mustgather_test.go b/pkg/ocp/mustgather/plan_mustgather_test.go
new file mode 100644
index 000000000..8e77cfc35
--- /dev/null
+++ b/pkg/ocp/mustgather/plan_mustgather_test.go
@@ -0,0 +1,296 @@
+package mustgather
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestPlanMustGather(t *testing.T) {
+ ctx := context.Background()
+
+ tests := []struct {
+ name string
+ params PlanMustGatherParams
+ shouldContain []string
+ shouldNotContain []string
+ wantError string
+ }{
+ {
+ name: "generates plan with default values",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{
+ "apiVersion: v1",
+ "kind: Pod",
+ "kind: ServiceAccount",
+ "kind: ClusterRoleBinding",
+ "must-gather-collector",
+ "image: registry.redhat.io/openshift4/ose-must-gather:latest",
+ "mountPath: /must-gather",
+ },
+ },
+ {
+ name: "generates plan with custom namespace",
+ params: PlanMustGatherParams{Namespace: "custom-must-gather-ns"},
+ shouldContain: []string{"namespace: custom-must-gather-ns"},
+ },
+ {
+ name: "generates plan with node name",
+ params: PlanMustGatherParams{NodeName: "worker-node-1"},
+ shouldContain: []string{"nodeName: worker-node-1"},
+ },
+ {
+ name: "generates plan with host network enabled",
+ params: PlanMustGatherParams{HostNetwork: true},
+ shouldContain: []string{"hostNetwork: true"},
+ },
+ {
+ name: "generates plan with custom source dir",
+ params: PlanMustGatherParams{SourceDir: "/custom/gather/path"},
+ shouldContain: []string{"mountPath: /custom/gather/path"},
+ },
+ {
+ name: "generates plan with multiple custom images",
+ params: PlanMustGatherParams{
+ Images: []string{"registry.redhat.io/custom/must-gather-1:v1", "registry.redhat.io/custom/must-gather-2:v2"},
+ },
+ shouldContain: []string{
+ "image: registry.redhat.io/custom/must-gather-1:v1",
+ "image: registry.redhat.io/custom/must-gather-2:v2",
+ "name: gather-1",
+ "name: gather-2",
+ },
+ },
+ {
+ name: "returns error when more than eight images",
+ params: PlanMustGatherParams{
+ Images: []string{
+ "registry.redhat.io/image/1", "registry.redhat.io/image/2", "registry.redhat.io/image/3", "registry.redhat.io/image/4",
+ "registry.redhat.io/image/5", "registry.redhat.io/image/6", "registry.redhat.io/image/7", "registry.redhat.io/image/8",
+ "registry.redhat.io/image/9",
+ },
+ },
+ wantError: "more than 8 gather images are not supported",
+ },
+ {
+ name: "returns error when using an unapproved registry",
+ params: PlanMustGatherParams{
+ Images: []string{
+ "docker.io/image/1",
+ },
+ },
+ wantError: "not from an approved Red Hat registry",
+ },
+ {
+ name: "generates plan with valid timeout",
+ params: PlanMustGatherParams{Timeout: "30m"},
+ shouldContain: []string{"/usr/bin/timeout", "30m", "/usr/bin/gather"},
+ },
+ {
+ name: "returns error for invalid timeout format",
+ params: PlanMustGatherParams{Timeout: "invalid-duration"},
+ wantError: "timeout duration is not valid",
+ },
+ {
+ name: "generates plan with valid since duration",
+ params: PlanMustGatherParams{Since: "1h"},
+ shouldContain: []string{"name: MUST_GATHER_SINCE", "value: 1h"},
+ },
+ {
+ name: "returns error for invalid since format",
+ params: PlanMustGatherParams{Since: "not-a-duration"},
+ wantError: "since duration is not valid",
+ },
+ {
+ name: "generates plan with custom gather command",
+ params: PlanMustGatherParams{GatherCommand: "/usr/bin/gather_audit_logs"},
+ shouldContain: []string{"/usr/bin/gather_audit_logs"},
+ },
+ {
+ name: "returns error when using an unknown gather command",
+ params: PlanMustGatherParams{GatherCommand: "/bin/the-custom-script"},
+ wantError: "must be one of allowed gather commands only",
+ },
+ {
+ name: "generates plan with node selector",
+ params: PlanMustGatherParams{NodeSelector: map[string]string{"node-role.kubernetes.io/worker": ""}},
+ shouldContain: []string{"nodeSelector:", "node-role.kubernetes.io/worker"},
+ },
+ {
+ name: "generates plan with cleanup instructions when keep_resources is false",
+ params: PlanMustGatherParams{KeepResources: false},
+ shouldContain: []string{"cleanup the created resources"},
+ },
+ {
+ name: "generates plan without cleanup instructions when keep_resources is true",
+ params: PlanMustGatherParams{KeepResources: true},
+ shouldNotContain: []string{"cleanup the created resources"},
+ },
+ {
+ name: "cleans source dir path",
+ params: PlanMustGatherParams{SourceDir: "/custom/path/../gather/./dir"},
+ shouldContain: []string{"mountPath: /custom/gather/dir"},
+ },
+ {
+ name: "generates plan with timeout and gather command combined",
+ params: PlanMustGatherParams{Timeout: "15m", GatherCommand: "/usr/bin/gather_network_logs"},
+ shouldContain: []string{"/usr/bin/timeout", "15m", "/usr/bin/gather_network_logs"},
+ },
+ {
+ name: "generates plan with all parameters combined",
+ params: PlanMustGatherParams{
+ Namespace: "test-ns",
+ NodeName: "node-1",
+ HostNetwork: true,
+ SourceDir: "/gather-output",
+ Since: "2h",
+ Timeout: "45m",
+ Images: []string{"registry.redhat.io/test/gather:v1"},
+ NodeSelector: map[string]string{"kubernetes.io/os": "linux"},
+ },
+ shouldContain: []string{
+ "namespace: test-ns",
+ "nodeName: node-1",
+ "hostNetwork: true",
+ "mountPath: /gather-output",
+ "value: 2h",
+ "/usr/bin/timeout",
+ "45m",
+ "image: registry.redhat.io/test/gather:v1",
+ "kubernetes.io/os",
+ },
+ },
+ {
+ name: "handles empty string timeout",
+ params: PlanMustGatherParams{Timeout: ""},
+ shouldContain: []string{"/usr/bin/gather"},
+ shouldNotContain: []string{"/usr/bin/timeout"},
+ },
+ {
+ name: "handles empty string since",
+ params: PlanMustGatherParams{Since: ""},
+ shouldNotContain: []string{"MUST_GATHER_SINCE"},
+ },
+ {
+ name: "handles empty images slice",
+ params: PlanMustGatherParams{Images: []string{}},
+ shouldContain: []string{"image: registry.redhat.io/openshift4/ose-must-gather:latest"},
+ },
+ {
+ name: "handles nil node selector",
+ params: PlanMustGatherParams{NodeSelector: nil},
+ },
+ {
+ name: "handles empty node selector map",
+ params: PlanMustGatherParams{NodeSelector: map[string]string{}},
+ },
+ {
+ name: "includes wait container in pod spec",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{"name: wait", "sleep infinity"},
+ },
+ {
+ name: "includes tolerations for all taints",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{"tolerations:", "operator: Exists"},
+ },
+ {
+ name: "includes priority class",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{"priorityClassName: system-cluster-critical"},
+ },
+ {
+ name: "includes restart policy never",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{"restartPolicy: Never"},
+ },
+ {
+ name: "includes cluster-admin role binding",
+ params: PlanMustGatherParams{},
+ shouldContain: []string{"name: cluster-admin", "kind: ClusterRole"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockKubernetesClient(nil)
+
+ result, err := PlanMustGather(ctx, client, tt.params)
+
+ if tt.wantError != "" {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tt.wantError)
+ require.Empty(t, result)
+ return
+ }
+
+ require.NoError(t, err)
+ require.NotEmpty(t, result)
+
+ for _, want := range tt.shouldContain {
+ require.Contains(t, result, want)
+ }
+ for _, notWant := range tt.shouldNotContain {
+ require.NotContains(t, result, notWant)
+ }
+ })
+ }
+
+ sarcTests := []struct {
+ name string
+ knownAccessor map[string]bool
+ params PlanMustGatherParams
+ shouldContain string
+ }{
+ {
+ name: "includes warning when no namespace create permission",
+ knownAccessor: map[string]bool{
+ "create::namespaces::": false,
+ },
+ shouldContain: "WARNING: The resources_create_or_update call does not have permission to create namespace(s)",
+ },
+ {
+ name: "includes warning when no serviceaccount create permission",
+ knownAccessor: map[string]bool{
+ "create::serviceaccounts::": false,
+ },
+ shouldContain: "WARNING: The resources_create_or_update call does not have permission to create serviceaccount(s)",
+ },
+ {
+ name: "includes warning when no clusterrolebinding create permission",
+ knownAccessor: map[string]bool{
+ "create:rbac.authorization.k8s.io:clusterrolebindings::": false,
+ },
+ shouldContain: "WARNING: The resources_create_or_update call does not have permission to create clusterrolebinding(s)",
+ },
+ {
+ name: "includes warning when no pod create permission",
+ knownAccessor: map[string]bool{
+ "create::pods::": false,
+ },
+ shouldContain: "WARNING: The resources_create_or_update call does not have permission to create pod(s)",
+ },
+ {
+ name: "includes warning when hostNetwork enabled without SCC permission",
+ knownAccessor: map[string]bool{
+ "use:security.openshift.io:securitycontextconstraints::": false,
+ },
+ shouldContain: "WARNING: The resources_create_or_update call does not have permission to create pod(s) with hostNetwork: true",
+ params: PlanMustGatherParams{
+ HostNetwork: true,
+ },
+ },
+ }
+
+ for _, tt := range sarcTests {
+ t.Run(tt.name, func(t *testing.T) {
+ client := newMockKubernetesClient(tt.knownAccessor)
+
+ result, err := PlanMustGather(ctx, client, tt.params)
+
+ require.NoError(t, err)
+ require.NotEmpty(t, result)
+ require.Contains(t, result, tt.shouldContain)
+ })
+ }
+}
diff --git a/pkg/ocp/mustgather/utils.go b/pkg/ocp/mustgather/utils.go
new file mode 100644
index 000000000..3a7a9c61e
--- /dev/null
+++ b/pkg/ocp/mustgather/utils.go
@@ -0,0 +1,98 @@
+package mustgather
+
+import (
+ "context"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/dynamic"
+)
+
+func getComponentImages(ctx context.Context, dynamicClient dynamic.Interface) ([]string, error) {
+ var images []string
+
+ appendImageFromAnnotation := func(obj runtime.Object) error {
+ unstruct, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
+ if err != nil {
+ return err
+ }
+
+ u := unstructured.Unstructured{Object: unstruct}
+ annotations := u.GetAnnotations()
+ if annotations[mgAnnotation] != "" {
+ images = append(images, annotations[mgAnnotation])
+ }
+
+ return nil
+ }
+
+ // List ClusterOperators
+ clusterOperatorGVR := schema.GroupVersionResource{
+ Group: "config.openshift.io",
+ Version: "v1",
+ Resource: "clusteroperators",
+ }
+ clusterOperatorsList, err := dynamicClient.Resource(clusterOperatorGVR).List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ if err := clusterOperatorsList.EachListItem(appendImageFromAnnotation); err != nil {
+ return images, err
+ }
+
+ // List ClusterServiceVersions
+ csvGVR := schema.GroupVersionResource{
+ Group: "operators.coreos.com",
+ Version: "v1alpha1",
+ Resource: "clusterserviceversions",
+ }
+ csvList, err := dynamicClient.Resource(csvGVR).List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return images, err
+ }
+
+ err = csvList.EachListItem(appendImageFromAnnotation)
+ return images, err
+}
+
+// isAllowedImageRegistry checks if the image reference starts with one of the allowed Red Hat registries.
+func isAllowedImageRegistry(image string) bool {
+ for _, registry := range allowedImageRegistries {
+ if strings.HasPrefix(image, registry+"/") {
+ return true
+ }
+ }
+ return false
+}
+
+// isAllowedImageRegistry checks if the gather command specified by user is allowed or not.
+func isAllowedGatherCommand(command string) bool {
+ for _, knownCmd := range allowedGatherCommands {
+ if command == knownCmd {
+ return true
+ }
+ }
+
+ return false
+}
+
+// ParseNodeSelector parses a comma-separated key=value selector string into a map.
+func ParseNodeSelector(selector string) map[string]string {
+ if selector == "" {
+ return nil
+ }
+
+ result := make(map[string]string)
+ pairs := strings.Split(selector, ",")
+ for _, pair := range pairs {
+ kv := strings.SplitN(strings.TrimSpace(pair), "=", 2)
+ if len(kv) == 2 && strings.TrimSpace(kv[0]) != "" {
+ result[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
+ }
+ }
+ return result
+}
diff --git a/pkg/toolsets/openshift/mustgather/mustgather.go b/pkg/toolsets/openshift/mustgather/mustgather.go
new file mode 100644
index 000000000..7a721a2fa
--- /dev/null
+++ b/pkg/toolsets/openshift/mustgather/mustgather.go
@@ -0,0 +1,156 @@
+package mustgather
+
+import (
+ "strings"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/api"
+ "github.com/containers/kubernetes-mcp-server/pkg/ocp/mustgather"
+)
+
+// Prompts returns the ServerPrompt definitions for must-gather operations.
+func Prompts() []api.ServerPrompt {
+ return []api.ServerPrompt{{
+ Prompt: api.Prompt{
+ Name: "plan_mustgather",
+ Title: "Plan a must-gather collection",
+ Description: "Plan for collecting a must-gather archive from an OpenShift cluster. Must-gather is a tool for collecting cluster data related to debugging and troubleshooting like logs, kubernetes resources, etc.",
+ Arguments: []api.PromptArgument{
+ {
+ Name: "node_name",
+ Description: "Specific node name to run must-gather pod on",
+ Required: false,
+ },
+ {
+ Name: "node_selector",
+ Description: "Node selector in key=value,key2=value2 format to filter nodes for the pod",
+ Required: false,
+ },
+ {
+ Name: "source_dir",
+ Description: "Custom gather directory inside pod (default: /must-gather)",
+ Required: false,
+ },
+ {
+ Name: "namespace",
+ Description: "Privileged namespace to use for must-gather (auto-generated if not specified)",
+ Required: false,
+ },
+ {
+ Name: "gather_command",
+ Description: "Custom gather command eg. /usr/bin/gather_audit_logs (default: /usr/bin/gather)",
+ Required: false,
+ },
+ {
+ Name: "timeout",
+ Description: "Timeout duration for gather command (eg. 30m, 1h)",
+ Required: false,
+ },
+ {
+ Name: "since",
+ Description: "Only gather data newer than this duration (eg. 5s, 2m5s, or 3h6m10s) defaults to all data.",
+ Required: false,
+ },
+ {
+ Name: "host_network",
+ Description: "Use host network for must-gather pod (true/false)",
+ Required: false,
+ },
+ {
+ Name: "keep_resources",
+ Description: "Keep pod resources after collection (true/false, default: false)",
+ Required: false,
+ },
+ {
+ Name: "all_component_images",
+ Description: "Include must-gather images from all installed operators (true/false)",
+ Required: false,
+ },
+ {
+ Name: "images",
+ Description: "Comma-separated list of custom must-gather container images",
+ Required: false,
+ },
+ },
+ },
+ Handler: planMustGatherHandler,
+ }}
+}
+
+// planMustGatherHandler is the handler that parses arguments and calls the core
+// PlanMustGather function.
+func planMustGatherHandler(params api.PromptHandlerParams) (*api.PromptCallResult, error) {
+ args := params.GetArguments()
+
+ mgParams := mustgather.PlanMustGatherParams{
+ NodeName: args["node_name"],
+ NodeSelector: mustgather.ParseNodeSelector(args["node_selector"]),
+ SourceDir: args["source_dir"],
+ Namespace: args["namespace"],
+ GatherCommand: args["gather_command"],
+ Timeout: args["timeout"],
+ Since: args["since"],
+ HostNetwork: parseBool(args["host_network"]),
+ KeepResources: parseBool(args["keep_resources"]),
+ AllImages: parseBool(args["all_component_images"]),
+ Images: parseImages(args["images"]),
+ }
+
+ // params embeds api.KubernetesClient
+ result, err := mustgather.PlanMustGather(params.Context, params, mgParams)
+ if err != nil {
+ return nil, err
+ }
+
+ return api.NewPromptCallResult(
+ "Must-gather plan generated successfully",
+ []api.PromptMessage{
+ {
+ Role: "user",
+ Content: api.PromptContent{
+ Type: "text",
+ Text: formatMustGatherPrompt(result),
+ },
+ },
+ {
+ Role: "assistant",
+ Content: api.PromptContent{
+ Type: "text",
+ Text: "I'll help you apply this must-gather plan to collect diagnostic data from your OpenShift cluster.",
+ },
+ },
+ },
+ nil,
+ ), nil
+}
+
+// parseBool parses a string value to boolean, returns false for empty or invalid values.
+func parseBool(value string) bool {
+ return strings.ToLower(strings.TrimSpace(value)) == "true"
+}
+
+// parseImages parses a comma-separated list of images into a slice.
+func parseImages(value string) []string {
+ if value == "" {
+ return nil
+ }
+ var images []string
+ for _, img := range strings.Split(value, ",") {
+ img = strings.TrimSpace(img)
+ if img != "" {
+ images = append(images, img)
+ }
+ }
+ return images
+}
+
+// formatMustGatherPrompt formats the must-gather plan result into a prompt for the LLM.
+func formatMustGatherPrompt(planResult string) string {
+ var sb strings.Builder
+
+ sb.WriteString("# Must-Gather Collection Plan\n\n")
+ sb.WriteString(planResult)
+ sb.WriteString("\n---\n\n")
+ sb.WriteString("**Please review the plan above and confirm if you want to proceed with applying these resources.**\n")
+
+ return sb.String()
+}
diff --git a/pkg/toolsets/openshift/toolset.go b/pkg/toolsets/openshift/toolset.go
new file mode 100644
index 000000000..a3eb145b5
--- /dev/null
+++ b/pkg/toolsets/openshift/toolset.go
@@ -0,0 +1,35 @@
+package openshift
+
+import (
+ "slices"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/api"
+ "github.com/containers/kubernetes-mcp-server/pkg/toolsets"
+ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/openshift/mustgather"
+)
+
+type Toolset struct{}
+
+var _ api.Toolset = (*Toolset)(nil)
+
+func (t *Toolset) GetName() string {
+ return "openshift"
+}
+
+func (t *Toolset) GetDescription() string {
+ return "OpenShift-specific tools for cluster management and troubleshooting"
+}
+
+func (t *Toolset) GetTools(o api.Openshift) []api.ServerTool {
+ return nil
+}
+
+func (t *Toolset) GetPrompts() []api.ServerPrompt {
+ return slices.Concat(
+ mustgather.Prompts(),
+ )
+}
+
+func init() {
+ toolsets.Register(&Toolset{})
+}