Skip to content

Commit 3a30af1

Browse files
committed
fix: reduce chunked storage ChunkSize to fit within Kubernetes Secret limit
OCPBUGS-85508 The chunked Helm release storage driver set ChunkSize to exactly 1MB (1,048,576 bytes), which is the Kubernetes Secret data limit. The index Secret stores both the first chunk and a JSON list of extra chunk names (extraChunks), so any release requiring chunking exceeded the limit: Secret "sh.helm.release.v1.odf-operator.v1" is invalid: data: Too long: may not be more than 1048576 bytes Reduce ChunkSize to (1024-8)*1024 (1,040,384 bytes), leaving 8KB of headroom for the extraChunks field, and increase MaxReadChunks and MaxWriteChunks from 10 to 11 to maintain total capacity above the previous theoretical maximum.
1 parent 869124a commit 3a30af1

3 files changed

Lines changed: 176 additions & 6 deletions

File tree

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
package action
2+
3+
import (
4+
"log"
5+
"os"
6+
"testing"
7+
"time"
8+
9+
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
10+
"k8s.io/client-go/rest"
11+
12+
"github.com/operator-framework/operator-controller/test"
13+
)
14+
15+
var cfg *rest.Config
16+
17+
func TestMain(m *testing.M) {
18+
testEnv := test.NewEnv()
19+
20+
var err error
21+
cfg, err = testEnv.Start()
22+
utilruntime.Must(err)
23+
if cfg == nil {
24+
log.Panic("expected cfg to not be nil")
25+
}
26+
27+
code := m.Run()
28+
stopErr := test.StopWithRetry(testEnv, time.Minute, time.Second)
29+
utilruntime.Must(stopErr)
30+
os.Exit(code)
31+
}

internal/operator-controller/action/storagedriver.go

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,15 @@ import (
1919
"github.com/operator-framework/helm-operator-plugins/pkg/storage"
2020
)
2121

22+
// chunkedSecretsConfig defines the chunked storage configuration for Helm release secrets.
23+
// ChunkSize is set below the 1MB Kubernetes Secret data limit to leave room for the
24+
// extraChunks index field stored alongside the first chunk in the index Secret.
25+
var chunkedSecretsConfig = storage.ChunkedSecretsConfig{
26+
ChunkSize: (1024 - 8) * 1024,
27+
MaxReadChunks: 11,
28+
MaxWriteChunks: 11,
29+
}
30+
2231
func ChunkedStorageDriverMapper(secretsGetter clientcorev1.SecretsGetter, reader client.Reader, namespace string) helmclient.ObjectToStorageDriverMapper {
2332
secretsClient := newSecretsDelegatingClient(secretsGetter, reader, namespace)
2433
return func(ctx context.Context, object client.Object, config *rest.Config) (driver.Driver, error) {
@@ -27,12 +36,9 @@ func ChunkedStorageDriverMapper(secretsGetter clientcorev1.SecretsGetter, reader
2736
ownerRefSecretClient := helmclient.NewOwnerRefSecretClient(secretsClient, ownerRefs, func(secret *corev1.Secret) bool {
2837
return secret.Type == storage.SecretTypeChunkedIndex
2938
})
30-
return storage.NewChunkedSecrets(ownerRefSecretClient, "operator-controller", storage.ChunkedSecretsConfig{
31-
ChunkSize: 1024 * 1024,
32-
MaxReadChunks: 10,
33-
MaxWriteChunks: 10,
34-
Log: func(format string, args ...interface{}) { log.Info(fmt.Sprintf(format, args...)) },
35-
}), nil
39+
cfg := chunkedSecretsConfig
40+
cfg.Log = func(format string, args ...interface{}) { log.Info(fmt.Sprintf(format, args...)) }
41+
return storage.NewChunkedSecrets(ownerRefSecretClient, "operator-controller", cfg), nil
3642
}
3743
}
3844

Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,133 @@
1+
package action
2+
3+
import (
4+
"context"
5+
"encoding/json"
6+
"fmt"
7+
"math/rand"
8+
"testing"
9+
10+
"github.com/stretchr/testify/assert"
11+
"github.com/stretchr/testify/require"
12+
"helm.sh/helm/v3/pkg/release"
13+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
14+
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
15+
16+
"github.com/operator-framework/helm-operator-plugins/pkg/storage"
17+
)
18+
19+
// maxCompressedReleaseSize is the maximum total size in bytes of a gzip-compressed
20+
// Helm release that the chunked storage driver can store. The release is split across
21+
// multiple Kubernetes Secrets, each holding exactly ChunkSize bytes of compressed data.
22+
// This value equals ChunkSize * MaxWriteChunks and represents a high water mark: it
23+
// must never decrease, as doing so would silently reject releases that previously fit.
24+
const maxCompressedReleaseSize = (1024 - 8) * 1024 * 11 // 1016 KB * 11 chunks = 11,176 KB
25+
26+
// minMaxWriteChunks and minMaxReadChunks are high water marks for the chunk
27+
// limits. Decreasing either value would cause previously-storable releases to
28+
// be rejected (write) or unreadable (read), so they must never go down.
29+
const (
30+
minMaxWriteChunks = 11
31+
minMaxReadChunks = 11
32+
)
33+
34+
func TestChunkedSecretsConfigTotalCapacity(t *testing.T) {
35+
assert.Equal(t, maxCompressedReleaseSize, chunkedSecretsConfig.ChunkSize*chunkedSecretsConfig.MaxWriteChunks,
36+
"ChunkSize * MaxWriteChunks must equal maxCompressedReleaseSize")
37+
}
38+
39+
func TestChunkedSecretsConfigMaxWriteChunks(t *testing.T) {
40+
assert.Equal(t, minMaxWriteChunks, chunkedSecretsConfig.MaxWriteChunks,
41+
"MaxWriteChunks changed — update minMaxWriteChunks to match (it must never decrease)")
42+
}
43+
44+
func TestChunkedSecretsConfigMaxReadChunks(t *testing.T) {
45+
assert.Equal(t, minMaxReadChunks, chunkedSecretsConfig.MaxReadChunks,
46+
"MaxReadChunks changed — update minMaxReadChunks to match (it must never decrease)")
47+
assert.GreaterOrEqual(t, chunkedSecretsConfig.MaxReadChunks, chunkedSecretsConfig.MaxWriteChunks,
48+
"MaxReadChunks must be >= MaxWriteChunks so any written release can be read back")
49+
}
50+
51+
func TestChunkedSecretsMaxCapacityRelease(t *testing.T) {
52+
// This test stores a release through the real chunked storage driver that
53+
// uses all MaxWriteChunks chunks, then reads it back and verifies each
54+
// chunk is at ChunkSize. This proves the configured ChunkSize fits within
55+
// the Kubernetes 1MB Secret data limit end-to-end against a real API server.
56+
57+
chunkSize := chunkedSecretsConfig.ChunkSize
58+
maxChunks := chunkedSecretsConfig.MaxWriteChunks
59+
60+
// Use a large incompressible payload to guarantee all chunks are used.
61+
// Raw []byte in Config is base64-encoded by json.Marshal, giving full
62+
// 8-bit entropy. The base64 expansion (~33%) and gzip compression roughly
63+
// cancel out at a ~1.004 ratio, so ChunkSize*10 raw bytes compresses to
64+
// just over 10 chunks, requiring all 11.
65+
rel := &release.Release{
66+
Name: "max-capacity-test",
67+
Version: 1,
68+
Config: map[string]any{"payload": deterministicPayload(chunkSize * (maxChunks - 1))},
69+
Info: &release.Info{Status: release.StatusDeployed},
70+
}
71+
72+
secretsClient := clientcorev1.NewForConfigOrDie(cfg).Secrets("default")
73+
drv := storage.NewChunkedSecrets(secretsClient, "test-owner", chunkedSecretsConfig)
74+
75+
key := fmt.Sprintf("sh.helm.release.v1.%s.v%d", rel.Name, rel.Version)
76+
require.NoError(t, drv.Create(key, rel))
77+
78+
// Verify round-trip
79+
actual, err := drv.Get(key)
80+
require.NoError(t, err)
81+
assert.Equal(t, rel.Name, actual.Name)
82+
assert.Equal(t, rel.Version, actual.Version)
83+
84+
// Collect secrets and verify chunk sizes using the extraChunks field
85+
// from the index Secret to determine ordering.
86+
allSecrets, err := secretsClient.List(context.Background(), metav1.ListOptions{})
87+
require.NoError(t, err)
88+
89+
secretsByName := map[string][]byte{}
90+
var indexChunkData []byte
91+
var extraChunkNames []string
92+
for _, s := range allSecrets.Items {
93+
switch s.Type {
94+
case storage.SecretTypeChunkedIndex:
95+
indexChunkData = s.Data["chunk"]
96+
require.NoError(t, json.Unmarshal(s.Data["extraChunks"], &extraChunkNames))
97+
case storage.SecretTypeChunkedChunk:
98+
secretsByName[s.Name] = s.Data["chunk"]
99+
}
100+
}
101+
102+
require.NotNil(t, indexChunkData, "index Secret must exist")
103+
require.Len(t, extraChunkNames, maxChunks-1,
104+
"release must use all %d chunks", maxChunks)
105+
106+
// The first 10 chunks (index + 9 extras) must be exactly ChunkSize.
107+
// The last chunk may be smaller since it holds the remainder.
108+
assert.Equalf(t, chunkSize, len(indexChunkData),
109+
"chunk 1/%d (index) must be exactly ChunkSize", maxChunks)
110+
for i, name := range extraChunkNames[:maxChunks-2] {
111+
chunkData, ok := secretsByName[name]
112+
require.True(t, ok, "chunk Secret %q not found", name)
113+
assert.Equalf(t, chunkSize, len(chunkData),
114+
"chunk %d/%d must be exactly ChunkSize", i+2, maxChunks)
115+
}
116+
117+
// The last chunk just needs to be non-empty.
118+
lastName := extraChunkNames[maxChunks-2]
119+
lastChunk, ok := secretsByName[lastName]
120+
require.True(t, ok, "last chunk Secret %q not found", lastName)
121+
assert.NotEmpty(t, lastChunk, "last chunk must be non-empty")
122+
123+
_, err = drv.Delete(key)
124+
require.NoError(t, err)
125+
}
126+
127+
func deterministicPayload(n int) []byte {
128+
r := rand.New(rand.NewSource(42))
129+
b := make([]byte, n)
130+
// rand.Read always returns len(b), nil
131+
r.Read(b)
132+
return b
133+
}

0 commit comments

Comments
 (0)