Skip to content

duplicated key in values.yaml fails in kustomize #433

@aberfeldy

Description

@aberfeldy

Applying the helm chart in version 1.16.3 leads to an error in kustomize via argocd:

recursed accumulation of path '<path to cached source>/apps/qdrant/base': 
could not parse merged values into map: 
received error yaml: unmarshal errors: line 85: 
mapping key "additionalLabels" already defined at line 16 for the following resource: [...]

helm show values qdrant/qdrant yields the following values, which indeed shows additionalLabels twice

replicaCount: 1

image:
  repository: docker.io/qdrant/qdrant
  pullPolicy: IfNotPresent
  tag: ""
  useUnprivilegedImage: false

imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
args: ["./config/initialize.sh"]
env: []
  # - name: QDRANT_ALLOW_RECOVERY_MODE
#   value: true
additionalLabels: {}

# checks - Readiness and liveness checks can only be enabled for either http (REST) or grpc (multiple checks not supported)
# grpc checks are only available from k8s 1.24+ so as of per default we check http
service:
  type: ClusterIP
  additionalLabels: {}
  annotations: {}
  loadBalancerIP: ""
  ports:
    - name: http
      port: 6333
      targetPort: 6333
      protocol: TCP
      checksEnabled: true
      # appProtocol: http
    - name: grpc
      port: 6334
      targetPort: 6334
      protocol: TCP
      checksEnabled: false
      # appProtocol: http2
    - name: p2p
      port: 6335
      targetPort: 6335
      protocol: TCP
      checksEnabled: false

ingress:
  enabled: false
  ingressClassName: ""
  additionalLabels: {}
  annotations: {}
  # kubernetes.io/ingress.class: alb
  hosts:
    - host: example-domain.com
      paths:
        - path: /
          pathType: Prefix
          servicePort: 6333
  tls: []
    # - hosts:
    #    - example-domain.com
  #   secretName: tls-secret-name

livenessProbe:
  enabled: false
  initialDelaySeconds: 5
  periodSeconds: 5
  timeoutSeconds: 1
  failureThreshold: 6
  successThreshold: 1

readinessProbe:
  enabled: true
  initialDelaySeconds: 5
  periodSeconds: 5
  timeoutSeconds: 1
  failureThreshold: 6
  successThreshold: 1

startupProbe:
  enabled: false
  initialDelaySeconds: 10
  periodSeconds: 5
  timeoutSeconds: 1
  failureThreshold: 30
  successThreshold: 1

additionalLabels: {}
# additionalAnnotations will be added to all top-level resources (StatefulSet, Service, ConfigMap, etc.)
additionalAnnotations: {}
podAnnotations: {}
podLabels: {}

resources: {}
  # limits:
  #   cpu: 100m
  #   memory: 128Mi
  # requests:
  #   cpu: 100m
#   memory: 128Mi

containerSecurityContext:
  runAsNonRoot: true
  runAsUser: 1000
  runAsGroup: 2000
  allowPrivilegeEscalation: false
  privileged: false
  readOnlyRootFilesystem: true

podSecurityContext:
  fsGroup: 3000
  fsGroupChangePolicy: Always

lifecycle:
  preStop:
    exec:
      # Sleeping before shutdown allows Qdrant to process requests that were
      # in-flight before the node is removed from load-balancing.
      # If using an external load balancer, you may need to increase this
      # duration to be greater than the LB's health check interval.
      command: ["sleep", "3"]

# Unless .Values.image.useUnprivilegedImage is set to true, ensures that the pre-existing
# files on the storage and snapshot volume are owned by the container's user and fsGroup.
updateVolumeFsOwnership: true

nodeSelector: {}

tolerations: []

affinity: {}
  # podAntiAffinity:
  #   requiredDuringSchedulingIgnoredDuringExecution:
  #   - labelSelector:
  #       matchExpressions:
  #         - key: app.kubernetes.io/name
  #           operator: In
  #           values:
  #             - '{{ include "qdrant.name" . }}'
  #         - key: app.kubernetes.io/instance
  #           operator: In
  #           values:
  #             - '{{ .Release.Name }}'
#     topologyKey: "kubernetes.io/hostname"

topologySpreadConstraints: []

persistence:
  accessModes: ["ReadWriteOnce"]
  size: 10Gi
  annotations: {}
  additionalLabels: {}
  # storageVolumeName: qdrant-storage
  # storageSubPath: ""
  # storageClassName: local-path
  # volumeAttributesClassName: ""

# If you use snapshots or the snapshot shard transfer mechanism, we recommend
# creating a separate volume of the same size as your main volume so that your
# cluster won't crash if the snapshot is too big.
snapshotPersistence:
  enabled: false
  accessModes: ["ReadWriteOnce"]
  size: 10Gi
  annotations: {}
  additionalLabels: {}
  # snapshotsVolumeName: qdrant-snapshots
  # snapshotsSubPath: ""
  # You can change the storageClassName to ensure snapshots are saved to cold storage.
  # storageClassName: local-path
  # volumeAttributesClassName: ""

snapshotRestoration:
  enabled: false
  # Set pvcName if you want to restore from a separately-created PVC. Only supported for single-node clusters unless the PVC is ReadWriteMany.
  # If you set snapshotPersistence.enabled and want to restore a snapshot from there, you can leave this blank to skip mounting an external volume.
  pvcName: snapshots-pvc
  # Must not conflict with /qdrant/snapshots or /qdrant/storage
  mountPath: /qdrant/snapshot-restoration
  snapshots:
  #  - /qdrant/snapshot-restoration/test_collection/test_collection-2022-10-24-13-56-50.snapshot:test_collection

# modification example for configuration to overwrite defaults
config:
  cluster:
    enabled: true
    p2p:
      port: 6335
      enable_tls: false
    consensus:
      tick_period_ms: 100
  service:
    enable_tls: false

sidecarContainers: []
# sidecarContainers:
#   - name: my-sidecar
#     image: qdrant/my-sidecar-image
#     imagePullPolicy: Always
#     ports:
#     - name: my-port
#       containerPort: 5000
#       protocol: TCP
#     resources:
#       requests:
#         memory: 10Mi
#         cpu: 10m
#       limits:
#         memory: 100Mi
#         cpu: 100m

metrics:
  serviceMonitor:
    enabled: false
    additionalLabels: {}
    scrapeInterval: 30s
    scrapeTimeout: 10s
    targetPort: http
    targetPath: "/metrics"
    ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
    ##
    metricRelabelings: []
    ## RelabelConfigs to apply to samples before scraping
    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
    ##
    relabelings: []
    ## Authorization to apply to the metrics endpoint for the cases when the API key(s) are configured externally
    ## ref: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.SafeAuthorization
    ##
    authorization: {}
    # authorization:
    #   type: Bearer
    #   credentials:
    #     name: external-secret-with-api-key
    #     key: api-key

serviceAccount:
  annotations: {}

priorityClassName: ""

shareProcessNamespace: false

# We discourage changing this setting. Using the "OrderedReady" policy in a
# multi-node cluster will cause a deadlock where nodes refuse to become
# "Ready" until all nodes are running.
podManagementPolicy: Parallel

podDisruptionBudget:
  enabled: false
  maxUnavailable: 1
  # do not enable if you are using not in 1.27
  unhealthyPodEvictionPolicy: ""
  # minAvailable: 1

# api key for authentication at qdrant
# false: no api key will be configured
# true: an api key will be auto-generated
# string: the given string will be set as an apikey
# Also supports reading in from an external secret using
# valueFrom:
#   secretKeyRef:
#     name:
#     key:
# apiKey: false

# read-only api key for authentication at qdrant
# false: no read-only api key will be configured
# true: an read-only api key will be auto-generated
# string: the given string will be set as a read-only apikey
# Also supports reading in from an external secret using
# valueFrom:
#   secretKeyRef:
#     name:
#     key:
# readOnlyApiKey: false

additionalVolumes: []
# - name: volumeName
#   emptyDir: {}

additionalVolumeMounts: []
# - name: volumeName
#   mountPath: "/mount/path"

chartTests:
  dbInteraction:
    image: registry.suse.com/bci/bci-base:latest
    resources:
      requests:
        cpu: 100m
        memory: 200Mi
      limits:
        cpu: 100m
        memory: 200Mi

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions