Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions internal/operator-controller/action/action_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
package action

import (
"log"
"os"
"testing"
"time"

utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/rest"

"github.com/operator-framework/operator-controller/test"
)

var cfg *rest.Config

func TestMain(m *testing.M) {
testEnv := test.NewEnv()

var err error
cfg, err = testEnv.Start()
utilruntime.Must(err)
if cfg == nil {
log.Panic("expected cfg to not be nil")
}

code := m.Run()
stopErr := test.StopWithRetry(testEnv, time.Minute, time.Second)
utilruntime.Must(stopErr)
os.Exit(code)
Comment thread
joelanford marked this conversation as resolved.
}
27 changes: 21 additions & 6 deletions internal/operator-controller/action/storagedriver.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,24 @@ import (
"github.com/operator-framework/helm-operator-plugins/pkg/storage"
)

// chunkedSecretsConfig defines the chunked storage configuration for Helm release secrets.
//
// Kubernetes limits the total size of a Secret's data values to 1MB (1,048,576
// bytes). The index Secret stores two data keys: "chunk" (up to ChunkSize bytes)
// and "extraChunks" (a JSON array of extra chunk Secret names). ChunkSize is set
// 8KB below the 1MB limit to leave headroom for the extraChunks field, whose
// worst-case size is ~2.6KB (10 entries at the 253-char DNS subdomain maximum).
//
// MaxWriteChunks is set to 11 so that total capacity (ChunkSize * MaxWriteChunks)
// exceeds the previous configuration's theoretical maximum (1MB * 10 = 10MB).
// These values are pinned by tests in storagedriver_test.go and must never
// decrease, as doing so would cause previously-storable releases to fail.
var chunkedSecretsConfig = storage.ChunkedSecretsConfig{
ChunkSize: (1024 - 8) * 1024,
MaxReadChunks: 11,
MaxWriteChunks: 11,
}

func ChunkedStorageDriverMapper(secretsGetter clientcorev1.SecretsGetter, reader client.Reader, namespace string) helmclient.ObjectToStorageDriverMapper {
secretsClient := newSecretsDelegatingClient(secretsGetter, reader, namespace)
return func(ctx context.Context, object client.Object, config *rest.Config) (driver.Driver, error) {
Expand All @@ -27,12 +45,9 @@ func ChunkedStorageDriverMapper(secretsGetter clientcorev1.SecretsGetter, reader
ownerRefSecretClient := helmclient.NewOwnerRefSecretClient(secretsClient, ownerRefs, func(secret *corev1.Secret) bool {
return secret.Type == storage.SecretTypeChunkedIndex
})
return storage.NewChunkedSecrets(ownerRefSecretClient, "operator-controller", storage.ChunkedSecretsConfig{
ChunkSize: 1024 * 1024,
MaxReadChunks: 10,
MaxWriteChunks: 10,
Log: func(format string, args ...interface{}) { log.Info(fmt.Sprintf(format, args...)) },
}), nil
cfg := chunkedSecretsConfig
cfg.Log = func(format string, args ...interface{}) { log.Info(fmt.Sprintf(format, args...)) }
return storage.NewChunkedSecrets(ownerRefSecretClient, "operator-controller", cfg), nil
}
}

Expand Down
143 changes: 143 additions & 0 deletions internal/operator-controller/action/storagedriver_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
package action

import (
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"testing"
Comment thread
joelanford marked this conversation as resolved.

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"helm.sh/helm/v3/pkg/release"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"

"github.com/operator-framework/helm-operator-plugins/pkg/storage"
)

const (
// maxCompressedReleaseSize is the maximum total size in bytes of a
// gzip-compressed Helm release that the chunked storage driver can store
// (ChunkSize * MaxWriteChunks). It is tested for exact equality with the
// config so that any config increase forces this const to be raised too.
// This value must never decrease: lowering it would cause previously-storable
// releases to fail.
maxCompressedReleaseSize = (1024 - 8) * 1024 * 11 // 1016 KB * 11 chunks = 11,176 KB

// minMaxWriteChunks is tested for exact equality with MaxWriteChunks so
// that any config increase forces this const to be raised too. This value
// must never decrease: lowering it would cause previously-storable releases
// to fail.
minMaxWriteChunks = 11

// minMaxReadChunks is tested for exact equality with MaxReadChunks so
// that any config increase forces this const to be raised too. This value
// must never decrease: lowering it would make previously-stored releases
// unreadable.
minMaxReadChunks = 11
)

func TestChunkedSecretsConfigTotalCapacity(t *testing.T) {
assert.Equal(t, maxCompressedReleaseSize, chunkedSecretsConfig.ChunkSize*chunkedSecretsConfig.MaxWriteChunks,
"ChunkSize * MaxWriteChunks must equal maxCompressedReleaseSize")
}

func TestChunkedSecretsConfigMaxWriteChunks(t *testing.T) {
assert.Equal(t, minMaxWriteChunks, chunkedSecretsConfig.MaxWriteChunks,
"MaxWriteChunks changed — update minMaxWriteChunks to match (it must never decrease)")
}

func TestChunkedSecretsConfigMaxReadChunks(t *testing.T) {
assert.Equal(t, minMaxReadChunks, chunkedSecretsConfig.MaxReadChunks,
"MaxReadChunks changed — update minMaxReadChunks to match (it must never decrease)")
Comment thread
joelanford marked this conversation as resolved.
assert.GreaterOrEqual(t, chunkedSecretsConfig.MaxReadChunks, chunkedSecretsConfig.MaxWriteChunks,
"MaxReadChunks must be >= MaxWriteChunks so any written release can be read back")
}

func TestChunkedSecretsMaxCapacityRelease(t *testing.T) {
// Regression test: stores a release through the real chunked storage driver
// that fills all MaxWriteChunks chunks, reads it back, and verifies the
// first MaxWriteChunks-1 chunks are exactly ChunkSize bytes. This proves
// the configured ChunkSize fits within the Kubernetes 1MB Secret data limit
// end-to-end against a real API server.

chunkSize := chunkedSecretsConfig.ChunkSize
maxChunks := chunkedSecretsConfig.MaxWriteChunks

// Use a large incompressible payload to guarantee all chunks are used.
// Raw []byte in Config is base64-encoded by json.Marshal, giving full
// 8-bit entropy. The base64 expansion (~33%) and gzip compression roughly
// cancel out at a ~1.004 ratio, so ChunkSize*10 raw bytes compresses to
// just over 10 chunks, requiring all 11.
rel := &release.Release{
Name: "max-capacity-test",
Version: 1,
Config: map[string]any{"payload": deterministicPayload(chunkSize * (maxChunks - 1))},
Info: &release.Info{Status: release.StatusDeployed},
}

secretsClient := clientcorev1.NewForConfigOrDie(cfg).Secrets("default")
drv := storage.NewChunkedSecrets(secretsClient, "test-owner", chunkedSecretsConfig)

key := fmt.Sprintf("sh.helm.release.v1.%s.v%d", rel.Name, rel.Version)
require.NoError(t, drv.Create(key, rel))
Comment thread
joelanford marked this conversation as resolved.

Comment thread
joelanford marked this conversation as resolved.
// Verify round-trip
actual, err := drv.Get(key)
require.NoError(t, err)
assert.Equal(t, rel.Name, actual.Name)
assert.Equal(t, rel.Version, actual.Version)

// Collect secrets and verify chunk sizes using the extraChunks field
// from the index Secret to determine ordering.
allSecrets, err := secretsClient.List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
Comment thread
joelanford marked this conversation as resolved.

secretsByName := map[string][]byte{}
var indexChunkData []byte
var extraChunkNames []string
for _, s := range allSecrets.Items {
switch s.Type {
case storage.SecretTypeChunkedIndex:
indexChunkData = s.Data["chunk"]
require.NoError(t, json.Unmarshal(s.Data["extraChunks"], &extraChunkNames))
case storage.SecretTypeChunkedChunk:
secretsByName[s.Name] = s.Data["chunk"]
}
}
Comment thread
joelanford marked this conversation as resolved.

require.NotNil(t, indexChunkData, "index Secret must exist")
require.Lenf(t, extraChunkNames, maxChunks-1,
"release must use all %d chunks", maxChunks)

// The first 10 chunks (index + 9 extras) must be exactly ChunkSize.
// The last chunk may be smaller since it holds the remainder.
assert.Lenf(t, indexChunkData, chunkSize,
"chunk 1/%d (index) must be exactly ChunkSize", maxChunks)
for i, name := range extraChunkNames[:maxChunks-2] {
chunkData, ok := secretsByName[name]
require.True(t, ok, "chunk Secret %q not found", name)
assert.Lenf(t, chunkData, chunkSize,
"chunk %d/%d must be exactly ChunkSize", i+2, maxChunks)
}

// The last chunk just needs to be non-empty.
lastName := extraChunkNames[maxChunks-2]
lastChunk, ok := secretsByName[lastName]
require.True(t, ok, "last chunk Secret %q not found", lastName)
assert.NotEmpty(t, lastChunk, "last chunk must be non-empty")

_, err = drv.Delete(key)
require.NoError(t, err)
}

func deterministicPayload(n int) []byte {
b := make([]byte, 0, n)
h := sha256.Sum256([]byte("deterministicPayload"))
for len(b) < n {
b = append(b, h[:]...)
h = sha256.Sum256(h[:])
}
return b[:n]
}
Loading