From c58acb4484d176f27efb70c236fd503d0117a570 Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Wed, 4 Feb 2026 16:34:40 +0100 Subject: [PATCH 01/11] WIP --- rust/operator-binary/src/connect/crd.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/rust/operator-binary/src/connect/crd.rs b/rust/operator-binary/src/connect/crd.rs index d9d138ba..0eaecc17 100644 --- a/rust/operator-binary/src/connect/crd.rs +++ b/rust/operator-binary/src/connect/crd.rs @@ -67,6 +67,7 @@ pub enum Error { ) )] pub mod versioned { + use stackable_operator::crd::s3; /// An Apache Spark Connect server component. This resource is managed by the Stackable operator /// for Apache Spark. Find more information on how to use it in the @@ -155,6 +156,10 @@ pub mod versioned { /// This can be shortened by the `maxCertificateLifetime` setting on the SecretClass issuing the TLS certificate. #[fragment_attrs(serde(default))] pub requested_secret_lifetime: Option, + + /// One or more S3 connections to be used by the Spark Connect server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub s3: Vec, } #[derive(Clone, Debug, Default, JsonSchema, PartialEq, Fragment)] From 72ef48b761a8a21baf66415831b7d2d444b8ee87 Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Thu, 5 Feb 2026 15:59:02 +0100 Subject: [PATCH 02/11] add s3 props, secret volumes and mounts --- deploy/helm/spark-k8s-operator/crds/crds.yaml | 174 ++++++++++++++++++ rust/operator-binary/Cargo.toml | 1 + rust/operator-binary/src/connect/common.rs | 2 +- .../operator-binary/src/connect/controller.rs | 19 +- rust/operator-binary/src/connect/crd.rs | 11 +- rust/operator-binary/src/connect/mod.rs | 1 + rust/operator-binary/src/connect/s3.rs | 160 ++++++++++++++++ rust/operator-binary/src/connect/server.rs | 40 ++-- 8 files changed, 388 insertions(+), 20 deletions(-) create mode 100644 rust/operator-binary/src/connect/s3.rs diff --git a/deploy/helm/spark-k8s-operator/crds/crds.yaml b/deploy/helm/spark-k8s-operator/crds/crds.yaml index bf2e55a2..fad727ef 100644 --- a/deploy/helm/spark-k8s-operator/crds/crds.yaml +++ b/deploy/helm/spark-k8s-operator/crds/crds.yaml @@ -2634,6 +2634,180 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: array + s3: + default: [] + description: One or more S3 connections to be used by the Spark Connect server. + items: + oneOf: + - required: + - inline + - required: + - reference + properties: + inline: + description: |- + S3 bucket specification containing the bucket name and an inlined or referenced connection specification. + Learn more on the [S3 concept documentation](https://docs.stackable.tech/home/nightly/concepts/s3). + properties: + bucketName: + description: The name of the S3 bucket. + type: string + connection: + description: The definition of an S3 connection, either inline or as a reference. + oneOf: + - required: + - inline + - required: + - reference + properties: + inline: + description: |- + S3 connection definition as a resource. + Learn more on the [S3 concept documentation](https://docs.stackable.tech/home/nightly/concepts/s3). + properties: + accessStyle: + default: VirtualHosted + description: |- + Which access style to use. + Defaults to virtual hosted-style as most of the data products out there. + Have a look at the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html). + enum: + - Path + - VirtualHosted + type: string + credentials: + description: |- + If the S3 uses authentication you have to specify you S3 credentials. + In the most cases a [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) + providing `accessKey` and `secretKey` is sufficient. + nullable: true + properties: + scope: + description: |- + [Scope](https://docs.stackable.tech/home/nightly/secret-operator/scope) of the + [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass). + nullable: true + properties: + listenerVolumes: + default: [] + description: |- + The listener volume scope allows Node and Service scopes to be inferred from the applicable listeners. + This must correspond to Volume names in the Pod that mount Listeners. + items: + type: string + type: array + node: + default: false + description: |- + The node scope is resolved to the name of the Kubernetes Node object that the Pod is running on. + This will typically be the DNS name of the node. + type: boolean + pod: + default: false + description: |- + The pod scope is resolved to the name of the Kubernetes Pod. + This allows the secret to differentiate between StatefulSet replicas. + type: boolean + services: + default: [] + description: |- + The service scope allows Pod objects to specify custom scopes. + This should typically correspond to Service objects that the Pod participates in. + items: + type: string + type: array + type: object + secretClass: + description: '[SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) containing the LDAP bind credentials.' + type: string + required: + - secretClass + type: object + host: + description: 'Host of the S3 server without any protocol or port. For example: `west1.my-cloud.com`.' + type: string + port: + description: |- + Port the S3 server listens on. + If not specified the product will determine the port to use. + format: uint16 + maximum: 65535.0 + minimum: 0.0 + nullable: true + type: integer + region: + default: + name: us-east-1 + description: |- + Bucket region used for signing headers (sigv4). + + This defaults to `us-east-1` which is compatible with other implementations such as Minio. + + WARNING: Some products use the Hadoop S3 implementation which falls back to us-east-2. + properties: + name: + default: us-east-1 + type: string + type: object + tls: + description: Use a TLS connection. If not specified no TLS will be used. + nullable: true + properties: + verification: + description: The verification method used to verify the certificates of the server and/or the client. + oneOf: + - required: + - none + - required: + - server + properties: + none: + description: Use TLS but don't verify certificates. + type: object + server: + description: Use TLS and a CA certificate to verify the server. + properties: + caCert: + description: CA cert to verify the server. + oneOf: + - required: + - webPki + - required: + - secretClass + properties: + secretClass: + description: |- + Name of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) which will provide the CA certificate. + Note that a SecretClass does not need to have a key but can also work with just a CA certificate, + so if you got provided with a CA cert but don't have access to the key you can still use this method. + type: string + webPki: + description: |- + Use TLS and the CA certificates trusted by the common web browsers to verify the server. + This can be useful when you e.g. use public AWS S3 or other public available services. + type: object + type: object + required: + - caCert + type: object + type: object + required: + - verification + type: object + required: + - host + type: object + reference: + type: string + type: object + required: + - bucketName + - connection + type: object + reference: + type: string + type: object + type: array server: default: roleConfig: diff --git a/rust/operator-binary/Cargo.toml b/rust/operator-binary/Cargo.toml index a7d48b23..1c7e1073 100644 --- a/rust/operator-binary/Cargo.toml +++ b/rust/operator-binary/Cargo.toml @@ -25,6 +25,7 @@ tracing-futures.workspace = true clap.workspace = true futures.workspace = true tokio.workspace = true +indoc.workspace = true [dev-dependencies] indoc.workspace = true diff --git a/rust/operator-binary/src/connect/common.rs b/rust/operator-binary/src/connect/common.rs index 78ca6e87..06128e83 100644 --- a/rust/operator-binary/src/connect/common.rs +++ b/rust/operator-binary/src/connect/common.rs @@ -96,7 +96,7 @@ pub(crate) fn jvm_args( // Merges server and executor properties and renders the contents // of the Spark properties file. pub(crate) fn spark_properties( - props: &[BTreeMap>; 2], + props: &[BTreeMap>], ) -> Result { let mut result = BTreeMap::new(); for p in props { diff --git a/rust/operator-binary/src/connect/controller.rs b/rust/operator-binary/src/connect/controller.rs index 9100852b..1a65d7a4 100644 --- a/rust/operator-binary/src/connect/controller.rs +++ b/rust/operator-binary/src/connect/controller.rs @@ -21,7 +21,7 @@ use strum::{EnumDiscriminants, IntoStaticStr}; use super::crd::{CONNECT_APP_NAME, CONNECT_CONTROLLER_NAME, v1alpha1}; use crate::{ Ctx, - connect::{common, crd::SparkConnectServerStatus, executor, server, service}, + connect::{common, crd::SparkConnectServerStatus, executor, s3, server, service}, crd::constants::{OPERATOR_NAME, SPARK_IMAGE_BASE_NAME}, }; @@ -142,6 +142,12 @@ pub enum Error { ResolveProductImage { source: product_image_selection::Error, }, + + #[snafu(display("failed to resolve S3 connections for SparkConnectServer {name:?}"))] + ResolveS3Connections { source: s3::Error, name: String }, + + #[snafu(display("failed to build connect server S3 properties"))] + S3SparkProperties { source: crate::connect::s3::Error }, } type Result = std::result::Result; @@ -186,6 +192,13 @@ pub async fn reconcile( .resolve(SPARK_IMAGE_BASE_NAME, crate::built_info::PKG_VERSION) .context(ResolveProductImageSnafu)?; + // Resolve any S3 connections early to fail fast if there are issues. + let resolved_s3_buckets = s3::ResolvedS3Buckets::resolve(client, scs) + .await + .with_context(|_| ResolveS3ConnectionsSnafu { + name: scs.name_unchecked(), + })?; + // Use a dedicated service account for connect server pods. let (service_account, role_binding) = build_rbac_resources( scs, @@ -229,6 +242,9 @@ pub async fn reconcile( // Server config map let spark_props = common::spark_properties(&[ + resolved_s3_buckets + .spark_properties() + .context(S3SparkPropertiesSnafu)?, server::server_properties( scs, &server_config, @@ -308,6 +324,7 @@ pub async fn reconcile( &server_config_map, &applied_listener.name_any(), args, + &resolved_s3_buckets, ) .context(BuildServerStatefulSetSnafu)?; diff --git a/rust/operator-binary/src/connect/crd.rs b/rust/operator-binary/src/connect/crd.rs index 0eaecc17..cc38c758 100644 --- a/rust/operator-binary/src/connect/crd.rs +++ b/rust/operator-binary/src/connect/crd.rs @@ -15,6 +15,7 @@ use stackable_operator::{ fragment::{self, Fragment, ValidationError}, merge::Merge, }, + crd::s3, deep_merger::ObjectOverrides, k8s_openapi::{api::core::v1::PodAntiAffinity, apimachinery::pkg::api::resource::Quantity}, kube::{CustomResource, ResourceExt}, @@ -67,8 +68,6 @@ pub enum Error { ) )] pub mod versioned { - use stackable_operator::crd::s3; - /// An Apache Spark Connect server component. This resource is managed by the Stackable operator /// for Apache Spark. Find more information on how to use it in the /// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/connect-server). @@ -88,6 +87,10 @@ pub mod versioned { #[serde(default)] pub cluster_operation: ClusterOperation, + /// One or more S3 connections to be used by the Spark Connect server. + #[serde(default)] + pub s3: Vec, + // Docs are on the ObjectOverrides struct #[serde(default)] pub object_overrides: ObjectOverrides, @@ -156,10 +159,6 @@ pub mod versioned { /// This can be shortened by the `maxCertificateLifetime` setting on the SecretClass issuing the TLS certificate. #[fragment_attrs(serde(default))] pub requested_secret_lifetime: Option, - - /// One or more S3 connections to be used by the Spark Connect server. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub s3: Vec, } #[derive(Clone, Debug, Default, JsonSchema, PartialEq, Fragment)] diff --git a/rust/operator-binary/src/connect/mod.rs b/rust/operator-binary/src/connect/mod.rs index 1692e6f5..daab765f 100644 --- a/rust/operator-binary/src/connect/mod.rs +++ b/rust/operator-binary/src/connect/mod.rs @@ -2,6 +2,7 @@ mod common; pub mod controller; pub mod crd; mod executor; +mod s3; pub mod server; mod service; diff --git a/rust/operator-binary/src/connect/s3.rs b/rust/operator-binary/src/connect/s3.rs new file mode 100644 index 00000000..95c498af --- /dev/null +++ b/rust/operator-binary/src/connect/s3.rs @@ -0,0 +1,160 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::{ + commons::secret_class::{SecretClassVolume, SecretClassVolumeError}, + crd::s3, + k8s_openapi::api::core::v1::{Volume, VolumeMount}, +}; + +use crate::{ + connect::crd, + crd::constants::{ACCESS_KEY_ID, S3_SECRET_DIR_NAME, SECRET_ACCESS_KEY}, +}; + +#[derive(Snafu, Debug)] +#[allow(clippy::enum_variant_names)] +pub enum Error { + #[snafu(display("failed to resolve S3 connection"))] + ResolveS3Connection { source: s3::v1alpha1::BucketError }, + + #[snafu(display("missing namespace"))] + MissingNamespace, + + #[snafu(display("failed to get endpoint for S3 bucket {bucket_name:?}"))] + BucketEndpoint { + bucket_name: String, + source: s3::v1alpha1::ConnectionError, + }, + + #[snafu(display( + "failed to create secret volume for S3 bucket with secret class {secret_class:?}" + ))] + S3SecretVolume { + secret_class: String, + source: SecretClassVolumeError, + }, +} + +pub(crate) struct ResolvedS3Buckets { + s3_buckets: Vec, + secret_class_volumes: BTreeSet, +} + +impl ResolvedS3Buckets { + pub(crate) async fn resolve( + client: &stackable_operator::client::Client, + connect_server: &crd::v1alpha1::SparkConnectServer, + ) -> Result { + let mut s3_buckets = Vec::new(); + let mut secret_class_volumes = BTreeSet::new(); + let namespace = connect_server + .metadata + .namespace + .as_ref() + .context(MissingNamespaceSnafu)?; + for conn in connect_server.spec.s3.iter() { + let resolved_bucket = conn + .clone() + .resolve(client, namespace) + .await + .context(ResolveS3ConnectionSnafu)?; + + if let Some(credentials) = &resolved_bucket.connection.credentials { + secret_class_volumes.insert(credentials.clone()); + } + + s3_buckets.push(resolved_bucket); + } + + Ok(ResolvedS3Buckets { + s3_buckets, + secret_class_volumes, + }) + } + + // Generate Spark properties for the resolved S3 buckets. + // Properties are generated "per bucket" using the prefix: spark.hadoop.fs.s3a.bucket.{bucket_name}. + pub(crate) fn spark_properties(&self) -> Result>, Error> { + let mut result = BTreeMap::new(); + + for bucket in &self.s3_buckets { + let bucket_name = bucket.bucket_name.clone(); + result.insert( + format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.endpoint"), + Some( + bucket + .connection + .endpoint() + .with_context(|_| BucketEndpointSnafu { + bucket_name: bucket_name.clone(), + })? + .to_string(), + ), + ); + result.insert( + format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.path.style.access"), + Some(bucket.connection.access_style.to_string()), + ); + result.insert( + format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.endpoint.region"), + Some(bucket.connection.region.name.clone()), + ); + if let Some(credentials) = &bucket.connection.credentials { + let secret_class_name = credentials.secret_class.clone(); + let secret_dir = format!("{S3_SECRET_DIR_NAME}/{secret_class_name}"); + + result.insert( + format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.access.key"), + Some(format!("${{file:UTF-8:{secret_dir}/{ACCESS_KEY_ID}}}")), + ); + result.insert( + format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.secret.key"), + Some(format!("${{file:UTF-8:{secret_dir}/{SECRET_ACCESS_KEY}}}")), + ); + result.insert( + format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.aws.credentials.provider"), + Some("org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider".to_string()), + ); + } else { + result.insert( + format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.aws.credentials.provider"), + Some("org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider".to_string()), + ); + } + } + + Ok(result) + } + + pub(crate) fn volumes(&self) -> Result, Error> { + let mut volumes = Vec::new(); + for secret_class_volume in self.secret_class_volumes.iter() { + volumes.push( + secret_class_volume + .to_volume(&secret_class_volume.secret_class) + .with_context(|_| S3SecretVolumeSnafu { + secret_class: secret_class_volume.secret_class.clone(), + })?, + ); + } + Ok(volumes) + } + + pub(crate) fn volume_mounts(&self) -> Vec { + let mut mounts = Vec::new(); + + for secret_class_volume in self.secret_class_volumes.iter() { + let secret_class_name = secret_class_volume.secret_class.clone(); + let secret_dir = format!("{S3_SECRET_DIR_NAME}/{secret_class_name}"); + + mounts.push(VolumeMount { + name: secret_class_name, + mount_path: secret_dir, + ..VolumeMount::default() + }); + } + + mounts + } +} diff --git a/rust/operator-binary/src/connect/server.rs b/rust/operator-binary/src/connect/server.rs index 861168c0..86e422bd 100644 --- a/rust/operator-binary/src/connect/server.rs +++ b/rust/operator-binary/src/connect/server.rs @@ -1,5 +1,6 @@ use std::collections::{BTreeMap, HashMap}; +use indoc::formatdoc; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_operator::{ builder::{ @@ -44,6 +45,7 @@ use crate::{ CONNECT_GRPC_PORT, CONNECT_UI_PORT, DEFAULT_SPARK_CONNECT_GROUP_NAME, SparkConnectContainer, v1alpha1, }, + s3, }, crd::{ constants::{ @@ -124,6 +126,9 @@ pub enum Error { #[snafu(display("failed build connect server jvm args for {name}"))] ServerJvmArgs { source: common::Error, name: String }, + + #[snafu(display("failed to add S3 secret volumes to stateful set"))] + S3SecretVolumes { source: s3::Error }, } // Assemble the configuration of the spark-connect server. @@ -204,6 +209,7 @@ pub(crate) fn server_config_map( .context(InvalidConfigMapSnafu { name: cm_name }) } +#[allow(clippy::too_many_arguments)] pub(crate) fn build_stateful_set( scs: &v1alpha1::SparkConnectServer, config: &v1alpha1::ServerConfig, @@ -212,6 +218,7 @@ pub(crate) fn build_stateful_set( config_map: &ConfigMap, listener_name: &str, args: Vec, + resolved_s3_buckets: &s3::ResolvedS3Buckets, ) -> Result { let server_role = SparkConnectRole::Server.to_string(); let recommended_object_labels = common::labels( @@ -284,6 +291,8 @@ pub(crate) fn build_stateful_set( .context(AddVolumeMountSnafu)? .add_volume_mount(LISTENER_VOLUME_NAME, LISTENER_VOLUME_DIR) .context(AddVolumeMountSnafu)? + .add_volume_mounts(resolved_s3_buckets.volume_mounts()) + .context(AddVolumeMountSnafu)? .readiness_probe(probe()) .liveness_probe(probe()); @@ -346,6 +355,14 @@ pub(crate) fn build_stateful_set( .context(BuildListenerVolumeSnafu)?, ]); + // Add any secret volumes needed for the configured S3 buckets + pb.add_volumes( + resolved_s3_buckets + .volumes() + .context(S3SecretVolumesSnafu)?, + ) + .context(AddVolumeSnafu)?; + // Merge user defined pod template if available let mut pod_template = pb.build_template(); if let Some(pod_overrides_spec) = scs @@ -396,18 +413,17 @@ pub(crate) fn build_stateful_set( #[allow(clippy::result_large_err)] pub(crate) fn command_args(user_args: &[String]) -> Vec { - let mut command = vec![ - // ---------- start containerdebug - format!( - "containerdebug --output={VOLUME_MOUNT_PATH_LOG}/containerdebug-state.json --loop &" - ), - // ---------- start spark connect server - "/stackable/spark/sbin/start-connect-server.sh".to_string(), - "--deploy-mode client".to_string(), // 'cluster' mode not supported - "--master k8s://https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT_HTTPS}" - .to_string(), - format!("--properties-file {VOLUME_MOUNT_PATH_CONFIG}/{SPARK_DEFAULTS_FILE_NAME}"), - ]; + let mut command = vec![formatdoc! { " + containerdebug --output={VOLUME_MOUNT_PATH_LOG}/containerdebug-state.json --loop & + + cp {VOLUME_MOUNT_PATH_CONFIG}/{SPARK_DEFAULTS_FILE_NAME} /tmp/spark.properties + config-utils template /tmp/spark.properties + + /stackable/spark/sbin/start-connect-server.sh \\ + --deploy-mode client \\ + --master k8s://https://${{KUBERNETES_SERVICE_HOST}}:${{KUBERNETES_SERVICE_PORT_HTTPS}} \\ + --properties-file /tmp/spark.properties + " }]; // User provided command line arguments command.extend_from_slice(user_args); From 2d550b132b8da728fd2cddf23384f09867e9a530 Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Thu, 5 Feb 2026 17:45:12 +0100 Subject: [PATCH 03/11] use higher level functions from op-rs --- .../operator-binary/src/connect/controller.rs | 1 + rust/operator-binary/src/connect/executor.rs | 23 +++++- rust/operator-binary/src/connect/s3.rs | 82 ++++++++----------- rust/operator-binary/src/connect/server.rs | 19 +++-- 4 files changed, 72 insertions(+), 53 deletions(-) diff --git a/rust/operator-binary/src/connect/controller.rs b/rust/operator-binary/src/connect/controller.rs index 1a65d7a4..4d4898e3 100644 --- a/rust/operator-binary/src/connect/controller.rs +++ b/rust/operator-binary/src/connect/controller.rs @@ -279,6 +279,7 @@ pub async fn reconcile( &executor_config, &resolved_product_image, &executor_config_map, + &resolved_s3_buckets, ) .context(ExecutorPodTemplateSnafu)?, ) diff --git a/rust/operator-binary/src/connect/executor.rs b/rust/operator-binary/src/connect/executor.rs index 2aba7e49..465c5331 100644 --- a/rust/operator-binary/src/connect/executor.rs +++ b/rust/operator-binary/src/connect/executor.rs @@ -26,7 +26,7 @@ use super::{ crd::{DEFAULT_SPARK_CONNECT_GROUP_NAME, SparkConnectContainer}, }; use crate::{ - connect::{common, crd::v1alpha1}, + connect::{common, crd::v1alpha1, s3}, crd::constants::{ JVM_SECURITY_PROPERTIES_FILE, LOG4J2_CONFIG_FILE, MAX_SPARK_LOG_FILES_SIZE, METRICS_PROPERTIES_FILE, POD_TEMPLATE_FILE, SPARK_DEFAULTS_FILE_NAME, @@ -85,6 +85,12 @@ pub enum Error { source: builder::configmap::Error, cm_name: String, }, + + #[snafu(display("failed to add S3 secret or tls volume mounts to exectors"))] + AddS3VolumeMount { source: s3::Error }, + + #[snafu(display("failed to add S3 secret volumes to exectors"))] + AddS3Volume { source: s3::Error }, } // The executor pod template can contain only a handful of properties. @@ -102,6 +108,7 @@ pub fn executor_pod_template( config: &v1alpha1::ExecutorConfig, resolved_product_image: &ResolvedProductImage, config_map: &ConfigMap, + resolved_s3_buckets: &s3::ResolvedS3Buckets, ) -> Result { let container_env = executor_env( scs.spec @@ -118,6 +125,13 @@ pub fn executor_pod_template( .add_volume_mount(VOLUME_MOUNT_NAME_CONFIG, VOLUME_MOUNT_PATH_CONFIG) .context(AddVolumeMountSnafu)? .add_volume_mount(VOLUME_MOUNT_NAME_LOG, VOLUME_MOUNT_PATH_LOG) + .context(AddVolumeMountSnafu)? + .add_volume_mounts( + resolved_s3_buckets + .volumes_and_mounts() + .context(AddS3VolumeMountSnafu)? + .1, + ) .context(AddVolumeMountSnafu)?; let metadata = ObjectMetaBuilder::new() @@ -148,6 +162,13 @@ pub fn executor_pod_template( .with_config_map(config_map.name_unchecked()) .build(), ) + .context(AddVolumeSnafu)? + .add_volumes( + resolved_s3_buckets + .volumes_and_mounts() + .context(AddS3VolumeSnafu)? + .0, + ) .context(AddVolumeSnafu)?; if let Some(cm_name) = config.log_config_map() { diff --git a/rust/operator-binary/src/connect/s3.rs b/rust/operator-binary/src/connect/s3.rs index 95c498af..f4bce982 100644 --- a/rust/operator-binary/src/connect/s3.rs +++ b/rust/operator-binary/src/connect/s3.rs @@ -1,16 +1,13 @@ -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::BTreeMap; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_operator::{ - commons::secret_class::{SecretClassVolume, SecretClassVolumeError}, + commons::secret_class::SecretClassVolumeError, crd::s3, k8s_openapi::api::core::v1::{Volume, VolumeMount}, }; -use crate::{ - connect::crd, - crd::constants::{ACCESS_KEY_ID, S3_SECRET_DIR_NAME, SECRET_ACCESS_KEY}, -}; +use crate::connect::crd; #[derive(Snafu, Debug)] #[allow(clippy::enum_variant_names)] @@ -34,11 +31,15 @@ pub enum Error { secret_class: String, source: SecretClassVolumeError, }, + + #[snafu(display("failed to get volumes and mounts for S3 connection"))] + ConnectionVolumesAndMounts { + source: s3::v1alpha1::ConnectionError, + }, } pub(crate) struct ResolvedS3Buckets { s3_buckets: Vec, - secret_class_volumes: BTreeSet, } impl ResolvedS3Buckets { @@ -47,7 +48,6 @@ impl ResolvedS3Buckets { connect_server: &crd::v1alpha1::SparkConnectServer, ) -> Result { let mut s3_buckets = Vec::new(); - let mut secret_class_volumes = BTreeSet::new(); let namespace = connect_server .metadata .namespace @@ -60,17 +60,10 @@ impl ResolvedS3Buckets { .await .context(ResolveS3ConnectionSnafu)?; - if let Some(credentials) = &resolved_bucket.connection.credentials { - secret_class_volumes.insert(credentials.clone()); - } - s3_buckets.push(resolved_bucket); } - Ok(ResolvedS3Buckets { - s3_buckets, - secret_class_volumes, - }) + Ok(ResolvedS3Buckets { s3_buckets }) } // Generate Spark properties for the resolved S3 buckets. @@ -100,17 +93,16 @@ impl ResolvedS3Buckets { format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.endpoint.region"), Some(bucket.connection.region.name.clone()), ); - if let Some(credentials) = &bucket.connection.credentials { - let secret_class_name = credentials.secret_class.clone(); - let secret_dir = format!("{S3_SECRET_DIR_NAME}/{secret_class_name}"); - + if let Some((access_key_file_path, secret_key_file_path)) = + bucket.connection.credentials_mount_paths() + { result.insert( format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.access.key"), - Some(format!("${{file:UTF-8:{secret_dir}/{ACCESS_KEY_ID}}}")), + Some(format!("${{file:UTF-8:{access_key_file_path}}}")), ); result.insert( format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.secret.key"), - Some(format!("${{file:UTF-8:{secret_dir}/{SECRET_ACCESS_KEY}}}")), + Some(format!("${{file:UTF-8:{secret_key_file_path}}}")), ); result.insert( format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.aws.credentials.provider"), @@ -127,34 +119,30 @@ impl ResolvedS3Buckets { Ok(result) } - pub(crate) fn volumes(&self) -> Result, Error> { - let mut volumes = Vec::new(); - for secret_class_volume in self.secret_class_volumes.iter() { - volumes.push( - secret_class_volume - .to_volume(&secret_class_volume.secret_class) - .with_context(|_| S3SecretVolumeSnafu { - secret_class: secret_class_volume.secret_class.clone(), - })?, - ); - } - Ok(volumes) - } + // Ensures that there are no duplicate volumes or mounts across buckets. + pub(crate) fn volumes_and_mounts(&self) -> Result<(Vec, Vec), Error> { + let mut volumes_by_name = BTreeMap::new(); + let mut mounts_by_name = BTreeMap::new(); - pub(crate) fn volume_mounts(&self) -> Vec { - let mut mounts = Vec::new(); + for bucket in self.s3_buckets.iter() { + let (bucket_volumes, bucket_mounts) = bucket + .connection + .volumes_and_mounts() + .context(ConnectionVolumesAndMountsSnafu)?; - for secret_class_volume in self.secret_class_volumes.iter() { - let secret_class_name = secret_class_volume.secret_class.clone(); - let secret_dir = format!("{S3_SECRET_DIR_NAME}/{secret_class_name}"); - - mounts.push(VolumeMount { - name: secret_class_name, - mount_path: secret_dir, - ..VolumeMount::default() - }); + for volume in bucket_volumes.iter() { + let volume_name = volume.name.clone(); + volumes_by_name.entry(volume_name).or_insert(volume.clone()); + } + for mount in bucket_mounts.iter() { + let mount_name = mount.name.clone(); + mounts_by_name.entry(mount_name).or_insert(mount.clone()); + } } - mounts + Ok(( + volumes_by_name.into_values().collect(), + mounts_by_name.into_values().collect(), + )) } } diff --git a/rust/operator-binary/src/connect/server.rs b/rust/operator-binary/src/connect/server.rs index 86e422bd..cd308c67 100644 --- a/rust/operator-binary/src/connect/server.rs +++ b/rust/operator-binary/src/connect/server.rs @@ -127,8 +127,11 @@ pub enum Error { #[snafu(display("failed build connect server jvm args for {name}"))] ServerJvmArgs { source: common::Error, name: String }, + #[snafu(display("failed to add S3 secret or tls volume mounts to stateful set"))] + AddS3VolumeMount { source: s3::Error }, + #[snafu(display("failed to add S3 secret volumes to stateful set"))] - S3SecretVolumes { source: s3::Error }, + AddS3Volume { source: s3::Error }, } // Assemble the configuration of the spark-connect server. @@ -291,7 +294,12 @@ pub(crate) fn build_stateful_set( .context(AddVolumeMountSnafu)? .add_volume_mount(LISTENER_VOLUME_NAME, LISTENER_VOLUME_DIR) .context(AddVolumeMountSnafu)? - .add_volume_mounts(resolved_s3_buckets.volume_mounts()) + .add_volume_mounts( + resolved_s3_buckets + .volumes_and_mounts() + .context(AddS3VolumeMountSnafu)? + .1, + ) .context(AddVolumeMountSnafu)? .readiness_probe(probe()) .liveness_probe(probe()); @@ -358,8 +366,9 @@ pub(crate) fn build_stateful_set( // Add any secret volumes needed for the configured S3 buckets pb.add_volumes( resolved_s3_buckets - .volumes() - .context(S3SecretVolumesSnafu)?, + .volumes_and_mounts() + .context(AddS3VolumeSnafu)? + .0, ) .context(AddVolumeSnafu)?; @@ -502,7 +511,7 @@ pub(crate) fn server_properties( ), ( "spark.kubernetes.driver.pod.name".to_string(), - Some("${env:HOSTNAME}".to_string()), + Some("${{env:HOSTNAME}}".to_string()), ), ( "spark.driver.defaultJavaOptions".to_string(), From 2a49d9b2ba670acd66cc2460e3e05a1a1255761b Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Thu, 5 Feb 2026 19:12:08 +0100 Subject: [PATCH 04/11] regenrate certs for "minio" host update test --- Cargo.nix | 4 ++ rust/operator-binary/src/connect/server.rs | 2 +- .../kuttl/spark-connect/04-minio-secrets.yaml | 56 +++++++++++++++++++ .../kuttl/spark-connect/04-minio-users.yaml | 21 ------- .../10-deploy-spark-connect.yaml.j2 | 50 +++++++++-------- .../20-run-connect-client.yaml.j2 | 22 ++++---- .../kuttl/spark-connect/certs/generate.sh | 48 ++++++++++++++++ .../helm-bitnami-minio-values.yaml | 4 ++ 8 files changed, 151 insertions(+), 56 deletions(-) create mode 100644 tests/templates/kuttl/spark-connect/04-minio-secrets.yaml delete mode 100644 tests/templates/kuttl/spark-connect/04-minio-users.yaml create mode 100755 tests/templates/kuttl/spark-connect/certs/generate.sh diff --git a/Cargo.nix b/Cargo.nix index d04a8ea5..fc4f195a 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -8522,6 +8522,10 @@ rec { packageId = "futures 0.3.31"; features = [ "compat" ]; } + { + name = "indoc"; + packageId = "indoc"; + } { name = "product-config"; packageId = "product-config"; diff --git a/rust/operator-binary/src/connect/server.rs b/rust/operator-binary/src/connect/server.rs index cd308c67..9e27e241 100644 --- a/rust/operator-binary/src/connect/server.rs +++ b/rust/operator-binary/src/connect/server.rs @@ -511,7 +511,7 @@ pub(crate) fn server_properties( ), ( "spark.kubernetes.driver.pod.name".to_string(), - Some("${{env:HOSTNAME}}".to_string()), + Some("${env:HOSTNAME}".to_string()), ), ( "spark.driver.defaultJavaOptions".to_string(), diff --git a/tests/templates/kuttl/spark-connect/04-minio-secrets.yaml b/tests/templates/kuttl/spark-connect/04-minio-secrets.yaml new file mode 100644 index 00000000..219d7f48 --- /dev/null +++ b/tests/templates/kuttl/spark-connect/04-minio-secrets.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-users +type: Opaque +stringData: + username1: | + username=spark + password=sparkspark + disabled=false + policies=readwrite,consoleAdmin,diagnostics + setPolicies=false +--- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: SecretClass +metadata: + name: minio-tls-ca +spec: + backend: + k8sSearch: + searchNamespace: + pod: {} +--- +# Certificate authoity for the "minio" host. +# Generated with certs/generate.sh in this test folder. +apiVersion: v1 +kind: Secret +metadata: + name: minio-tls-ca + labels: + secrets.stackable.tech/class: minio-tls-ca +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQyVENDQXNHZ0F3SUJBZ0lVVUR4Z0VQUnlqQzU2anFoaDNKQTRBQng0a1RFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2V6RUxNQWtHQTFVRUJoTUNSRVV4R3pBWkJnTlZCQWdNRWxOamFHeGxjM2RwWnkxSWIyeHpkR1ZwYmpFTwpNQXdHQTFVRUJ3d0ZWMlZrWld3eEtEQW1CZ05WQkFvTUgxTjBZV05yWVdKc1pTQlRhV2R1YVc1bklFRjFkR2h2CmNtbDBlU0JKYm1NeEZUQVRCZ05WQkFNTURITjBZV05yWVdKc1pTNWtaVEFnRncweU5qQXlNRFV4TnpVeU5EUmEKR0E4eU1USTJNREV4TWpFM05USTBORm93ZXpFTE1Ba0dBMVVFQmhNQ1JFVXhHekFaQmdOVkJBZ01FbE5qYUd4bApjM2RwWnkxSWIyeHpkR1ZwYmpFT01Bd0dBMVVFQnd3RlYyVmtaV3d4S0RBbUJnTlZCQW9NSDFOMFlXTnJZV0pzClpTQlRhV2R1YVc1bklFRjFkR2h2Y21sMGVTQkpibU14RlRBVEJnTlZCQU1NREhOMFlXTnJZV0pzWlM1a1pUQ0MKQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFOQ1RHZlQ5L3E0YTlZYnVWcDltdkhkZwoyTFdzWFowUG9tTnJLUmhDajBxTFdaR3JTampVbzJqMkhlVmRpVUY0bWZIQkF0Y2Y1RGdoOHZlWmJ4eUc5SDE2Cnp0bnNjdUEvZ3dJc3VhUnlsTlNSSy9QVzlMREw0VXJXK1RrMGpZYTdlMzNlUmR3OFJ3VVpkU0hRMFhTMHJWMjMKQzgzYjJoVGo3Z0dkNXVVejBLeWt1cldLVWRDallNQTJaaEhUZWtwTTRYNEF3WCs0bk5TUi9JY3FFVzhzVE1pZwowVEQvYU9DSTFEdEtBV2ErU2lUYWdsQU9lNHcvMUZiQ2RmU0hudHJSSUVVOE1DVkxDSUo3UTdGSVI2N1dTUXdlCmQ2SGdFQTJmNXFiSlNhQjVTcG1VT3JVdXZmL1R2b0QrQ0E2TVA3RVBLdFVRdWZIMjQ2MzdpdkpIa1dTTUs2OEMKQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRkpJbW53aXd4eEdpN3BlVTc2dlk4ZjZVMkRZcE1COEdBMVVkSXdRWQpNQmFBRkpJbW53aXd4eEdpN3BlVTc2dlk4ZjZVMkRZcE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJCmh2Y05BUUVMQlFBRGdnRUJBSkN2aTV4YWpIQ0pzd1d2SkZ3UmVkZ2grSHhFalI5QngwSWQvbWo4dEhNQ1J1VzAKdmdxL3hpQ3NtRmw4MFJIRjRGM1hiSHFMRWpMUVE4aGZqNXFFbkhMWFJwa2JuRG13NkpLK1NPNUkzUjRBblRrdApGY08vMUQwZ2pjOTFXb3lrUmFZT3pFU3dNbXBTbE9id2thT1lmb3dhVjQ5VEhIRG12Q3U5Um5sT0QxVXZhd2psCkRMTjFFZFVrRG1FMjJiN1RHK2wvRUhlYURGUzM4YitYSUQvZEkzS25GZ3JNbTNTTHNCRmQzKzhyN0xlUC9IT1AKUXpRWGEzRTBpQ1Vud1A4NlU1VHBaczJaR3RJamEwMnh5U0tCUUhoaUZPdE1lWlJWa0VnRCt5b3BIUUx5TXlKMQpyU1lxdm80UUcxRHdMUU1PTllhWndWeHJhQUYySHgzVldKcU5DbFU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR5RENDQXJDZ0F3SUJBZ0lVVStiUHpFMDBBUzBFRklmYi9SMEJ2M2dDWFNNd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2V6RUxNQWtHQTFVRUJoTUNSRVV4R3pBWkJnTlZCQWdNRWxOamFHeGxjM2RwWnkxSWIyeHpkR1ZwYmpFTwpNQXdHQTFVRUJ3d0ZWMlZrWld3eEtEQW1CZ05WQkFvTUgxTjBZV05yWVdKc1pTQlRhV2R1YVc1bklFRjFkR2h2CmNtbDBlU0JKYm1NeEZUQVRCZ05WQkFNTURITjBZV05yWVdKc1pTNWtaVEFnRncweU5qQXlNRFV4TnpVeU5EUmEKR0E4eU1USTJNREV4TWpFM05USTBORm93WGpFTE1Ba0dBMVVFQmhNQ1JFVXhHekFaQmdOVkJBZ01FbE5qYUd4bApjM2RwWnkxSWIyeHpkR1ZwYmpFT01Bd0dBMVVFQnd3RlYyVmtaV3d4RWpBUUJnTlZCQW9NQ1ZOMFlXTnJZV0pzClpURU9NQXdHQTFVRUF3d0ZiV2x1YVc4d2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUIKQVFETm4rbDdwQVNGekZmTUlWRE1qUVB5eTZnY2g1K1lkYW1ZMjJ1QjI5OUxZVXk3aGdJL25kRGNWZFMweWJ0SgpHTi9UYU0zcmpLVWVLYWUxR2pIeHFnZTczMWtsbXlJeWF6MWtCYm9qMHVEcEtMS2kzcVlpNmR0cFkzYTV5c295CmJoYlQvRkd5MGJwNXZRZmZ0c1NyNDZBNGZyQ1I0UmhiTWVXYnBqYmgrUkJGOWNDekxoa2ZBUzFkSVRTeFlPNjYKQzB4ZXFWWGwxc2x0bkZjRS9GRWVaNXVHQ1kvTDdEY1duV1pGZ2p1RG1HUitTSkpLdnFFT2tucHlMUzh5QTY2WQpaZDJxQnFaSUxTSUs4UFRtK0E4TlVEZ1UxNHhWa3BkWC9tRjBncFpMcnhOYWtEUkVoUEs2MkljQ09SOFJBcDJNCnA1TVpyZEtaSS9KUGlHVTkzUWZueUI3bEFnTUJBQUdqWHpCZE1Cc0dBMVVkRVFRVU1CS0NCVzFwYm1sdmdnbHMKYjJOaGJHaHZjM1F3SFFZRFZSME9CQllFRktUeG9NVzdTQW5pQXh6cTFqQ1pnNjJmZmtaY01COEdBMVVkSXdRWQpNQmFBRkpJbW53aXd4eEdpN3BlVTc2dlk4ZjZVMkRZcE1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRREtrTXZJCk5XU1ZQa2llZTU4WHdvYTU1TmRmVXgwNUUvWDYzZXhyMGluYVBnZW4xMlppeXhMcFVRZ3hXdWFVU3pkQWd2ZHgKRW9zc2J0L3J2b0pDaUlmRDkyd1BsS2JIVlk5OUR4WGNqdTlOMmZZZWdSVHFrVTRDVUdjd25RcDMzMDNpNEY4ago4OW5JUW9KVkZwTEdVQUJaOC9VM1Z3U3ZEUCs2bDlrbVpQREVtbkdLR3RoTWFRMW9CZGUxV1BWR2hwMHJCdm9MCkpFejRFTnRCNWpEK05ldndibWNCT1UvbUtiLzJ3U1dCS0NTTTQ3Wk44QzR3Q0tLRTZ0VVk0VERuYWZid2RhMmMKWWU0UnZJYTE3cytiVmF0VUMxdUpRL1FMdEdQS3lRamYvWmh4QnVndElIN3pIVDZUYllCSDMvdW1kUGxmYS8zSApZQTdFL041MkN0K3VSRjU0Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRRE5uK2w3cEFTRnpGZk0KSVZETWpRUHl5NmdjaDUrWWRhbVkyMnVCMjk5TFlVeTdoZ0kvbmREY1ZkUzB5YnRKR04vVGFNM3JqS1VlS2FlMQpHakh4cWdlNzMxa2xteUl5YXoxa0Jib2owdURwS0xLaTNxWWk2ZHRwWTNhNXlzb3liaGJUL0ZHeTBicDV2UWZmCnRzU3I0NkE0ZnJDUjRSaGJNZVdicGpiaCtSQkY5Y0N6TGhrZkFTMWRJVFN4WU82NkMweGVxVlhsMXNsdG5GY0UKL0ZFZVo1dUdDWS9MN0RjV25XWkZnanVEbUdSK1NKSkt2cUVPa25weUxTOHlBNjZZWmQycUJxWklMU0lLOFBUbQorQThOVURnVTE0eFZrcGRYL21GMGdwWkxyeE5ha0RSRWhQSzYySWNDT1I4UkFwMk1wNU1acmRLWkkvSlBpR1U5CjNRZm55QjdsQWdNQkFBRUNnZ0VBR21vM2tVV1J1eXAwQU9vcXVneEhmSkpEQjE4NDFsb1BMbTdKa2NZUUdsdm0KZ3BTRmgyeWJueUo3ajduMmtENWN5b2pGSTBSUEZkL2VCbnJWL2FpTkU4cHVabEZXaEVtWWVsZnVBSm9mZ0hSVQo5bTFKeEdSc1prNTd1d1JkRXp0blBWWkZuSVlxd1diU015QUVoZHhaQWNqc24rRGR3eUZXMExiNmgrNzU4ektTCmpsSG13cWpHQUl2YVJlSnViU1JvdHRwZ2JobjdHSHZDaVZOR3JUdWV1VStNcFV1eHhJbXVYRHZpZ1krY1ZxdzIKcktPYkwyVHRSTDJ1L01adEJGQ0pYbHBKK2JXa0FzRXdXNHVBWnhKeTJSMkx1cDVEdzgxVTI0Rmw4b2U5clpGTQoxMlBZWFJFVmhISWI1djNhVzJLL0ptNkswMGFLUHhKbEtncmJFdkRUSVFLQmdRRDl3ODFNdUNUWFl3dWRvQjUwCm5PeHREVTBqTENoVjE0VmxhR25MSmh0Ukp0TWZpaXdQa0R1TmF2S0RJMnpvbGtnWCszYXZ0MmI3NlNiY0hvTzkKMzBPLytjSE9KUWZ0K0Y4L0xlYWZOaEtIYTVudUQxeWtHbEZPQVJpV3p3TWF0Zm1IZHJtOTJXS2JNVHpUaS9tVwpqWENuKzVrY3lNR1QzUGl6dWxTVDZRQkpYUUtCZ1FEUGI0L1dGYS9qK0VPcHIwdkZzR3RLWDlUUTFFNU1HblVkCmJSVHBCWk80QmUyZE5DWHRFeWVtZ0cwbnB6cDZXTXZzVjRzV2NBSmt5aytWa0RBZ2o5cUVJbTNEUDFnOFRRbGMKRHo4N1pxV1lPZzhwRkVCbURxbk5aYW1CVy9aK1ZiNHBiRUdlMm14RHBiSmRMdTNOVmxRZnAvVWI3WGtaQ2dnUgp0YVRSMXJUcktRS0JnREVkRUVMazhOeHU0dlNpNU1JVkRQMGVNZXU0eENXNURLeFB4UW40V2hrZXRvWElMRGJtCjUxKzdieXhLVXUzQkNEcjhCRUNGOG55VzUxcDYzV3lHSllxbVFBZ3h5cE1ZR3ZjVFh5czVQK1ROd29EOG9DVnkKb29IQ1hJdnpqTnBDbGdUTnlhMGd3YURmcXJJV3lUdUdMR09Xb2srYjJ6dE83U043MEpxLzRicFJBb0dBZnBBQQo0SXdtM3g3d21hMWN1K2RoN3VUOWdkU25XUU9qaFNxeTRXSUh1UFhVL2w5ODdHTU5oQ2REY2pnMEU4WHQxZXVyCjd5cTBLeTdNMCtJL212NXFRc2lHMCtQb1FCSjRyWFNZRGZRWkFRSWJrZUxMVC9tT1hNVzBZRHJ0OERMOGJXV2gKdS94a3BmbUpGQlczL2RxNFJRQkRLcUQvaStsMDl1a3ZBT0RSVGRFQ2dZRUFySnV3aUgxaUJobUFwUFJaTE1BLwpDNW5memxlbFJDQmwybngrZHA1c1hsZFdseHBsREsvVTByZjJaTVBZNkdQQmlBRnBZSXVHNThrREtRUTlRNjgrCmVDYmNMMno4b0llU3ZQWndZUTRPVkFyTTFTR1dKWE5kZm1IbndJTjZlRVI1cG9UNVYyRXA1NjU3bVNRakJkQ1YKbDMwMVU0blBFY1BsekdBdjVJVFJzT1E9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K +--- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: SecretClass +metadata: + name: minio-credentials-class +spec: + backend: + k8sSearch: + searchNamespace: + pod: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-credentials + labels: + secrets.stackable.tech/class: minio-credentials-class +stringData: + accessKey: spark + secretKey: sparkspark diff --git a/tests/templates/kuttl/spark-connect/04-minio-users.yaml b/tests/templates/kuttl/spark-connect/04-minio-users.yaml deleted file mode 100644 index 400fef72..00000000 --- a/tests/templates/kuttl/spark-connect/04-minio-users.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: minio-users -type: Opaque -stringData: - username1: | - username=spark - password=sparkspark - disabled=false - policies=readwrite,consoleAdmin,diagnostics - setPolicies=false ---- -apiVersion: v1 -kind: Secret -metadata: - name: s3-credentials -stringData: - accessKey: spark - secretKey: sparkspark diff --git a/tests/templates/kuttl/spark-connect/10-deploy-spark-connect.yaml.j2 b/tests/templates/kuttl/spark-connect/10-deploy-spark-connect.yaml.j2 index da974901..0c6d4ee6 100644 --- a/tests/templates/kuttl/spark-connect/10-deploy-spark-connect.yaml.j2 +++ b/tests/templates/kuttl/spark-connect/10-deploy-spark-connect.yaml.j2 @@ -1,4 +1,29 @@ --- +apiVersion: s3.stackable.tech/v1alpha1 +kind: S3Connection +metadata: + name: mybucket-s3-connection +spec: + host: minio + port: 9000 + accessStyle: Path + credentials: + secretClass: minio-credentials-class + tls: + verification: + server: + caCert: + secretClass: minio-tls-ca +--- +apiVersion: s3.stackable.tech/v1alpha1 +kind: S3Bucket +metadata: + name: mybucket-s3-bucket +spec: + bucketName: mybucket + connection: + reference: mybucket-s3-connection +--- apiVersion: v1 kind: ConfigMap metadata: @@ -35,30 +60,9 @@ spec: {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} - args: - # These are unfortunately required to make the S3A connector work with MinIO - # I had expected the clients to be able to set these, but that is not the case. - - --conf spark.hadoop.fs.s3a.aws.credentials.provider=org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider - - --conf spark.hadoop.fs.s3a.path.style.access=true - - --conf spark.hadoop.fs.s3a.endpoint=http://minio:9000 - - --conf spark.hadoop.fs.s3a.region=us-east-1 + s3: + - reference: mybucket-s3-bucket server: - podOverrides: - spec: - containers: - - name: spark - env: - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: s3-credentials - key: accessKey - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: s3-credentials - key: secretKey - jvmArgumentOverrides: add: - -Dmy.custom.jvm.arg=customValue diff --git a/tests/templates/kuttl/spark-connect/20-run-connect-client.yaml.j2 b/tests/templates/kuttl/spark-connect/20-run-connect-client.yaml.j2 index 67842a27..63a2dd3b 100644 --- a/tests/templates/kuttl/spark-connect/20-run-connect-client.yaml.j2 +++ b/tests/templates/kuttl/spark-connect/20-run-connect-client.yaml.j2 @@ -76,17 +76,17 @@ spec: requests: cpu: 200m memory: 128Mi - env: - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: s3-credentials - key: accessKey - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: s3-credentials - key: secretKey + # env: + # - name: AWS_ACCESS_KEY_ID + # valueFrom: + # secretKeyRef: + # name: minio-credentials + # key: accessKey + # - name: AWS_SECRET_ACCESS_KEY + # valueFrom: + # secretKeyRef: + # name: minio-credentials + # key: secretKey volumeMounts: - name: spark-connect-client mountPath: /app diff --git a/tests/templates/kuttl/spark-connect/certs/generate.sh b/tests/templates/kuttl/spark-connect/certs/generate.sh new file mode 100755 index 00000000..dfd63c18 --- /dev/null +++ b/tests/templates/kuttl/spark-connect/certs/generate.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +echo "Creating client cert" +FQDN="minio" + +echo "Creating Root Certificate Authority" +openssl genrsa \ + -out root-ca.key.pem \ + 2048 + +echo "Self-signing the Root Certificate Authority" +openssl req \ + -x509 \ + -new \ + -nodes \ + -key root-ca.key.pem \ + -days 36500 \ + -out root-ca.crt.pem \ + -subj "/C=DE/ST=Schleswig-Holstein/L=Wedel/O=Stackable Signing Authority Inc/CN=stackable.de" + +openssl genrsa \ + -out client.key.pem \ + 2048 + +echo "Creating the CSR" +openssl req -new \ + -key client.key.pem \ + -out client.csr.pem \ + -subj "/C=DE/ST=Schleswig-Holstein/L=Wedel/O=Stackable/CN=${FQDN}" \ + -addext "subjectAltName = DNS:${FQDN}, DNS:localhost" + +echo "Signing the client cert with the root ca" +openssl x509 \ + -req -in client.csr.pem \ + -CA root-ca.crt.pem \ + -CAkey root-ca.key.pem \ + -CAcreateserial \ + -out client.crt.pem \ + -days 36500 \ + -copy_extensions copy + +echo "Copying the files to match the api of the secret-operator" +cp root-ca.crt.pem ca.crt +cp client.key.pem tls.key +cp client.crt.pem tls.crt + +echo "To create a k8s secret run" +echo "kubectl create secret generic minio-tls-ca --from-file=ca.crt --from-file=tls.crt --from-file=tls.key" diff --git a/tests/templates/kuttl/spark-connect/helm-bitnami-minio-values.yaml b/tests/templates/kuttl/spark-connect/helm-bitnami-minio-values.yaml index b9c72811..8557a57b 100644 --- a/tests/templates/kuttl/spark-connect/helm-bitnami-minio-values.yaml +++ b/tests/templates/kuttl/spark-connect/helm-bitnami-minio-values.yaml @@ -67,3 +67,7 @@ resources: service: type: NodePort + +tls: + enabled: true + existingSecret: minio-tls-ca From 8c034a8bfda512857b81d3eb7d16375b920442c6 Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Thu, 5 Feb 2026 21:10:18 +0100 Subject: [PATCH 05/11] fix boolean path style property --- rust/operator-binary/src/connect/s3.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rust/operator-binary/src/connect/s3.rs b/rust/operator-binary/src/connect/s3.rs index f4bce982..037defee 100644 --- a/rust/operator-binary/src/connect/s3.rs +++ b/rust/operator-binary/src/connect/s3.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_operator::{ commons::secret_class::SecretClassVolumeError, - crd::s3, + crd::s3::{self, v1alpha1::S3AccessStyle}, k8s_openapi::api::core::v1::{Volume, VolumeMount}, }; @@ -87,7 +87,7 @@ impl ResolvedS3Buckets { ); result.insert( format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.path.style.access"), - Some(bucket.connection.access_style.to_string()), + Some((bucket.connection.access_style == S3AccessStyle::Path).to_string()), ); result.insert( format!("spark.hadoop.fs.s3a.bucket.{bucket_name}.endpoint.region"), From b549812b2eb282975ac720f0629a50728a545f7a Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Fri, 6 Feb 2026 10:55:27 +0100 Subject: [PATCH 06/11] add connectors field and successfully ran non tls tests --- deploy/helm/spark-k8s-operator/crds/crds.yaml | 353 +++++++++--------- rust/operator-binary/src/connect/crd.rs | 50 ++- rust/operator-binary/src/connect/s3.rs | 2 +- .../10-deploy-spark-connect.yaml.j2 | 7 +- ...yaml => helm-bitnami-minio-values.yaml.j2} | 3 +- tests/test-definition.yaml | 4 +- 6 files changed, 239 insertions(+), 180 deletions(-) rename tests/templates/kuttl/spark-connect/{helm-bitnami-minio-values.yaml => helm-bitnami-minio-values.yaml.j2} (95%) diff --git a/deploy/helm/spark-k8s-operator/crds/crds.yaml b/deploy/helm/spark-k8s-operator/crds/crds.yaml index fad727ef..dda16af9 100644 --- a/deploy/helm/spark-k8s-operator/crds/crds.yaml +++ b/deploy/helm/spark-k8s-operator/crds/crds.yaml @@ -2303,6 +2303,185 @@ spec: and `stopped` will take no effect until `reconciliationPaused` is set to false or removed. type: boolean type: object + connectors: + default: + s3: [] + description: One or more S3 connections to be used by the Spark Connect server. + properties: + s3: + default: [] + items: + oneOf: + - required: + - inline + - required: + - reference + properties: + inline: + description: |- + S3 bucket specification containing the bucket name and an inlined or referenced connection specification. + Learn more on the [S3 concept documentation](https://docs.stackable.tech/home/nightly/concepts/s3). + properties: + bucketName: + description: The name of the S3 bucket. + type: string + connection: + description: The definition of an S3 connection, either inline or as a reference. + oneOf: + - required: + - inline + - required: + - reference + properties: + inline: + description: |- + S3 connection definition as a resource. + Learn more on the [S3 concept documentation](https://docs.stackable.tech/home/nightly/concepts/s3). + properties: + accessStyle: + default: VirtualHosted + description: |- + Which access style to use. + Defaults to virtual hosted-style as most of the data products out there. + Have a look at the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html). + enum: + - Path + - VirtualHosted + type: string + credentials: + description: |- + If the S3 uses authentication you have to specify you S3 credentials. + In the most cases a [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) + providing `accessKey` and `secretKey` is sufficient. + nullable: true + properties: + scope: + description: |- + [Scope](https://docs.stackable.tech/home/nightly/secret-operator/scope) of the + [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass). + nullable: true + properties: + listenerVolumes: + default: [] + description: |- + The listener volume scope allows Node and Service scopes to be inferred from the applicable listeners. + This must correspond to Volume names in the Pod that mount Listeners. + items: + type: string + type: array + node: + default: false + description: |- + The node scope is resolved to the name of the Kubernetes Node object that the Pod is running on. + This will typically be the DNS name of the node. + type: boolean + pod: + default: false + description: |- + The pod scope is resolved to the name of the Kubernetes Pod. + This allows the secret to differentiate between StatefulSet replicas. + type: boolean + services: + default: [] + description: |- + The service scope allows Pod objects to specify custom scopes. + This should typically correspond to Service objects that the Pod participates in. + items: + type: string + type: array + type: object + secretClass: + description: '[SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) containing the LDAP bind credentials.' + type: string + required: + - secretClass + type: object + host: + description: 'Host of the S3 server without any protocol or port. For example: `west1.my-cloud.com`.' + type: string + port: + description: |- + Port the S3 server listens on. + If not specified the product will determine the port to use. + format: uint16 + maximum: 65535.0 + minimum: 0.0 + nullable: true + type: integer + region: + default: + name: us-east-1 + description: |- + Bucket region used for signing headers (sigv4). + + This defaults to `us-east-1` which is compatible with other implementations such as Minio. + + WARNING: Some products use the Hadoop S3 implementation which falls back to us-east-2. + properties: + name: + default: us-east-1 + type: string + type: object + tls: + description: Use a TLS connection. If not specified no TLS will be used. + nullable: true + properties: + verification: + description: The verification method used to verify the certificates of the server and/or the client. + oneOf: + - required: + - none + - required: + - server + properties: + none: + description: Use TLS but don't verify certificates. + type: object + server: + description: Use TLS and a CA certificate to verify the server. + properties: + caCert: + description: CA cert to verify the server. + oneOf: + - required: + - webPki + - required: + - secretClass + properties: + secretClass: + description: |- + Name of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) which will provide the CA certificate. + Note that a SecretClass does not need to have a key but can also work with just a CA certificate, + so if you got provided with a CA cert but don't have access to the key you can still use this method. + type: string + webPki: + description: |- + Use TLS and the CA certificates trusted by the common web browsers to verify the server. + This can be useful when you e.g. use public AWS S3 or other public available services. + type: object + type: object + required: + - caCert + type: object + type: object + required: + - verification + type: object + required: + - host + type: object + reference: + type: string + type: object + required: + - bucketName + - connection + type: object + reference: + type: string + type: object + type: array + type: object executor: description: Spark Connect executor properties. nullable: true @@ -2634,180 +2813,6 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: array - s3: - default: [] - description: One or more S3 connections to be used by the Spark Connect server. - items: - oneOf: - - required: - - inline - - required: - - reference - properties: - inline: - description: |- - S3 bucket specification containing the bucket name and an inlined or referenced connection specification. - Learn more on the [S3 concept documentation](https://docs.stackable.tech/home/nightly/concepts/s3). - properties: - bucketName: - description: The name of the S3 bucket. - type: string - connection: - description: The definition of an S3 connection, either inline or as a reference. - oneOf: - - required: - - inline - - required: - - reference - properties: - inline: - description: |- - S3 connection definition as a resource. - Learn more on the [S3 concept documentation](https://docs.stackable.tech/home/nightly/concepts/s3). - properties: - accessStyle: - default: VirtualHosted - description: |- - Which access style to use. - Defaults to virtual hosted-style as most of the data products out there. - Have a look at the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html). - enum: - - Path - - VirtualHosted - type: string - credentials: - description: |- - If the S3 uses authentication you have to specify you S3 credentials. - In the most cases a [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) - providing `accessKey` and `secretKey` is sufficient. - nullable: true - properties: - scope: - description: |- - [Scope](https://docs.stackable.tech/home/nightly/secret-operator/scope) of the - [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass). - nullable: true - properties: - listenerVolumes: - default: [] - description: |- - The listener volume scope allows Node and Service scopes to be inferred from the applicable listeners. - This must correspond to Volume names in the Pod that mount Listeners. - items: - type: string - type: array - node: - default: false - description: |- - The node scope is resolved to the name of the Kubernetes Node object that the Pod is running on. - This will typically be the DNS name of the node. - type: boolean - pod: - default: false - description: |- - The pod scope is resolved to the name of the Kubernetes Pod. - This allows the secret to differentiate between StatefulSet replicas. - type: boolean - services: - default: [] - description: |- - The service scope allows Pod objects to specify custom scopes. - This should typically correspond to Service objects that the Pod participates in. - items: - type: string - type: array - type: object - secretClass: - description: '[SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) containing the LDAP bind credentials.' - type: string - required: - - secretClass - type: object - host: - description: 'Host of the S3 server without any protocol or port. For example: `west1.my-cloud.com`.' - type: string - port: - description: |- - Port the S3 server listens on. - If not specified the product will determine the port to use. - format: uint16 - maximum: 65535.0 - minimum: 0.0 - nullable: true - type: integer - region: - default: - name: us-east-1 - description: |- - Bucket region used for signing headers (sigv4). - - This defaults to `us-east-1` which is compatible with other implementations such as Minio. - - WARNING: Some products use the Hadoop S3 implementation which falls back to us-east-2. - properties: - name: - default: us-east-1 - type: string - type: object - tls: - description: Use a TLS connection. If not specified no TLS will be used. - nullable: true - properties: - verification: - description: The verification method used to verify the certificates of the server and/or the client. - oneOf: - - required: - - none - - required: - - server - properties: - none: - description: Use TLS but don't verify certificates. - type: object - server: - description: Use TLS and a CA certificate to verify the server. - properties: - caCert: - description: CA cert to verify the server. - oneOf: - - required: - - webPki - - required: - - secretClass - properties: - secretClass: - description: |- - Name of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) which will provide the CA certificate. - Note that a SecretClass does not need to have a key but can also work with just a CA certificate, - so if you got provided with a CA cert but don't have access to the key you can still use this method. - type: string - webPki: - description: |- - Use TLS and the CA certificates trusted by the common web browsers to verify the server. - This can be useful when you e.g. use public AWS S3 or other public available services. - type: object - type: object - required: - - caCert - type: object - type: object - required: - - verification - type: object - required: - - host - type: object - reference: - type: string - type: object - required: - - bucketName - - connection - type: object - reference: - type: string - type: object - type: array server: default: roleConfig: diff --git a/rust/operator-binary/src/connect/crd.rs b/rust/operator-binary/src/connect/crd.rs index cc38c758..c8b35d06 100644 --- a/rust/operator-binary/src/connect/crd.rs +++ b/rust/operator-binary/src/connect/crd.rs @@ -89,7 +89,7 @@ pub mod versioned { /// One or more S3 connections to be used by the Spark Connect server. #[serde(default)] - pub s3: Vec, + connectors: Connectors, // Docs are on the ObjectOverrides struct #[serde(default)] @@ -188,6 +188,12 @@ pub mod versioned { #[fragment_attrs(serde(default))] pub requested_secret_lifetime: Option, } + + #[derive(Clone, Debug, Default, JsonSchema, PartialEq, Deserialize, Serialize)] + struct Connectors { + #[serde(default)] + pub s3: Vec, + } } #[allow(clippy::derive_partial_eq_without_eq)] @@ -395,3 +401,45 @@ impl v1alpha1::ExecutorConfig { } } } +#[cfg(test)] +mod tests { + use indoc::indoc; + + use super::*; + + #[test] + fn test_cr_minimal_deserialization() { + let _spark_connect_cr = serde_yaml::from_str::(indoc! { r#" + apiVersion: spark.stackable.tech/v1alpha1 + kind: SparkConnectServer + metadata: + name: spark-connect + spec: + image: + productVersion: 4.1.1 + "# }) + .expect("Failed to deserialize minimal SparkConnectServer CR"); + } + + #[test] + fn test_cr_s3_deserialization() { + let input = indoc! { r#" + --- + apiVersion: spark.stackable.tech/v1alpha1 + kind: SparkConnectServer + metadata: + name: spark-connect + spec: + image: + productVersion: 4.1.1 + connectors: + s3: + - reference: my-s3-bucket + "# }; + + let deserializer = serde_yaml::Deserializer::from_str(input); + let _spark_connect_cr: v1alpha1::SparkConnectServer = + serde_yaml::with::singleton_map_recursive::deserialize(deserializer) + .expect("Failed to deserialize SparkConnectServer with S3 connectors CR"); + } +} diff --git a/rust/operator-binary/src/connect/s3.rs b/rust/operator-binary/src/connect/s3.rs index 037defee..5e5b2313 100644 --- a/rust/operator-binary/src/connect/s3.rs +++ b/rust/operator-binary/src/connect/s3.rs @@ -53,7 +53,7 @@ impl ResolvedS3Buckets { .namespace .as_ref() .context(MissingNamespaceSnafu)?; - for conn in connect_server.spec.s3.iter() { + for conn in connect_server.spec.connectors.s3.iter() { let resolved_bucket = conn .clone() .resolve(client, namespace) diff --git a/tests/templates/kuttl/spark-connect/10-deploy-spark-connect.yaml.j2 b/tests/templates/kuttl/spark-connect/10-deploy-spark-connect.yaml.j2 index 0c6d4ee6..384571b1 100644 --- a/tests/templates/kuttl/spark-connect/10-deploy-spark-connect.yaml.j2 +++ b/tests/templates/kuttl/spark-connect/10-deploy-spark-connect.yaml.j2 @@ -9,11 +9,13 @@ spec: accessStyle: Path credentials: secretClass: minio-credentials-class +{% if test_scenario['values']['s3-use-tls'] == 'true' %} tls: verification: server: caCert: secretClass: minio-tls-ca +{% endif %} --- apiVersion: s3.stackable.tech/v1alpha1 kind: S3Bucket @@ -60,8 +62,9 @@ spec: {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} - s3: - - reference: mybucket-s3-bucket + connectors: + s3: + - reference: mybucket-s3-bucket server: jvmArgumentOverrides: add: diff --git a/tests/templates/kuttl/spark-connect/helm-bitnami-minio-values.yaml b/tests/templates/kuttl/spark-connect/helm-bitnami-minio-values.yaml.j2 similarity index 95% rename from tests/templates/kuttl/spark-connect/helm-bitnami-minio-values.yaml rename to tests/templates/kuttl/spark-connect/helm-bitnami-minio-values.yaml.j2 index 8557a57b..cf794a80 100644 --- a/tests/templates/kuttl/spark-connect/helm-bitnami-minio-values.yaml +++ b/tests/templates/kuttl/spark-connect/helm-bitnami-minio-values.yaml.j2 @@ -67,7 +67,8 @@ resources: service: type: NodePort - +{% if test_scenario['values']['s3-use-tls'] == 'true' %} tls: enabled: true existingSecret: minio-tls-ca +{% endif %} diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index ebb0b697..fb3e3e8e 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -62,7 +62,8 @@ dimensions: - name: s3-use-tls values: - "false" - - "true" + # TODO: re-enable when the spark-connect implementation works. + # - "true" tests: - name: smoke dimensions: @@ -134,6 +135,7 @@ tests: dimensions: - spark-connect - openshift + - s3-use-tls suites: - name: nightly From 6682beb9260fc37ab6bb1e855fc24ce76efedd97 Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Fri, 6 Feb 2026 17:16:45 +0100 Subject: [PATCH 07/11] add truststore init container and re-enable tls tests --- rust/operator-binary/src/connect/crd.rs | 37 ++++- rust/operator-binary/src/connect/executor.rs | 17 ++- rust/operator-binary/src/connect/s3.rs | 146 ++++++++++++++++++- rust/operator-binary/src/connect/server.rs | 11 +- tests/test-definition.yaml | 3 +- 5 files changed, 206 insertions(+), 8 deletions(-) diff --git a/rust/operator-binary/src/connect/crd.rs b/rust/operator-binary/src/connect/crd.rs index c8b35d06..46490781 100644 --- a/rust/operator-binary/src/connect/crd.rs +++ b/rust/operator-binary/src/connect/crd.rs @@ -422,7 +422,7 @@ mod tests { } #[test] - fn test_cr_s3_deserialization() { + fn test_cr_s3_ref_deserialization() { let input = indoc! { r#" --- apiVersion: spark.stackable.tech/v1alpha1 @@ -442,4 +442,39 @@ mod tests { serde_yaml::with::singleton_map_recursive::deserialize(deserializer) .expect("Failed to deserialize SparkConnectServer with S3 connectors CR"); } + + #[test] + fn test_cr_s3_inline_deserialization() { + let input = indoc! { r#" + --- + apiVersion: spark.stackable.tech/v1alpha1 + kind: SparkConnectServer + metadata: + name: spark-connect + spec: + image: + productVersion: 4.1.1 + connectors: + s3: + - inline: + bucketName: mybucket + connection: + inline: + host: minio + port: 9000 + accessStyle: Path + credentials: + secretClass: minio-credentials-class + tls: + verification: + server: + caCert: + secretClass: minio-tls-ca + "# }; + + let deserializer = serde_yaml::Deserializer::from_str(input); + let _spark_connect_cr: v1alpha1::SparkConnectServer = + serde_yaml::with::singleton_map_recursive::deserialize(deserializer) + .expect("Failed to deserialize SparkConnectServer with S3 connectors CR"); + } } diff --git a/rust/operator-binary/src/connect/executor.rs b/rust/operator-binary/src/connect/executor.rs index 465c5331..ef13ff37 100644 --- a/rust/operator-binary/src/connect/executor.rs +++ b/rust/operator-binary/src/connect/executor.rs @@ -14,7 +14,7 @@ use stackable_operator::{ }, k8s_openapi::{ DeepMerge, - api::core::v1::{ConfigMap, EnvVar, PodTemplateSpec}, + api::core::v1::{ConfigMap, EnvVar, PodSecurityContext, PodTemplateSpec}, }, kube::{ResourceExt, runtime::reflector::ObjectRef}, product_logging::framework::calculate_log_volume_size_limit, @@ -169,7 +169,20 @@ pub fn executor_pod_template( .context(AddS3VolumeSnafu)? .0, ) - .context(AddVolumeSnafu)?; + .context(AddVolumeSnafu)? + // This is needed for shared enpryDir volumes with other containers like the truststore + // init container. + .security_context(PodSecurityContext { + fs_group: Some(1000), + ..PodSecurityContext::default() + }); + + // S3: Add truststore init container for S3 endpoint communication with TLS. + if let Some(truststore_init_container) = + resolved_s3_buckets.truststore_init_container(resolved_product_image.clone()) + { + template.add_init_container(truststore_init_container); + } if let Some(cm_name) = config.log_config_map() { container diff --git a/rust/operator-binary/src/connect/s3.rs b/rust/operator-binary/src/connect/s3.rs index 5e5b2313..745e5600 100644 --- a/rust/operator-binary/src/connect/s3.rs +++ b/rust/operator-binary/src/connect/s3.rs @@ -1,13 +1,21 @@ use std::collections::BTreeMap; +use indoc::formatdoc; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_operator::{ - commons::secret_class::SecretClassVolumeError, + commons::{ + product_image_selection::ResolvedProductImage, secret_class::SecretClassVolumeError, + }, crd::s3::{self, v1alpha1::S3AccessStyle}, k8s_openapi::api::core::v1::{Volume, VolumeMount}, }; -use crate::connect::crd; +use crate::{ + connect::crd, + crd::constants::{ + STACKABLE_TLS_STORE_PASSWORD, STACKABLE_TRUST_STORE, STACKABLE_TRUST_STORE_NAME, + }, +}; #[derive(Snafu, Debug)] #[allow(clippy::enum_variant_names)] @@ -116,6 +124,11 @@ impl ResolvedS3Buckets { } } + // Add any extra properties needed for TLS configuration. + if let Some(extra_tls_properties) = self.extra_java_options() { + result.extend(extra_tls_properties); + } + Ok(result) } @@ -140,9 +153,138 @@ impl ResolvedS3Buckets { } } + // Always add the truststore volume and mount even if they are not populated by an init + // container. + volumes_by_name + .entry(STACKABLE_TRUST_STORE_NAME.to_string()) + .or_insert_with(|| self.truststore_volume()); + mounts_by_name + .entry(STACKABLE_TRUST_STORE_NAME.to_string()) + .or_insert_with(|| VolumeMount { + name: STACKABLE_TRUST_STORE_NAME.to_string(), + mount_path: STACKABLE_TRUST_STORE.to_string(), + ..Default::default() + }); + Ok(( volumes_by_name.into_values().collect(), mounts_by_name.into_values().collect(), )) } + + pub(crate) fn truststore_init_container( + &self, + image: ResolvedProductImage, + ) -> Option { + self.build_truststore_from_pem_ca_command().map(|command| { + stackable_operator::k8s_openapi::api::core::v1::Container { + name: "tls-truststore-init".to_string(), + image: Some(image.image.clone()), + command: Some(vec![ + "/bin/bash".to_string(), + "-x".to_string(), + "-euo".to_string(), + "pipefail".to_string(), + "-c".to_string(), + command, + ]), + volume_mounts: Some(self.truststore_init_container_volume_mounts()), + ..Default::default() + } + }) + } + + // The list of volume mounts for the init container that builds the truststore from PEM CA certs. + // It contains the output volume mount as well as all the input mounts for the PEM CA certs. + fn truststore_init_container_volume_mounts(&self) -> Vec { + let mut result = self.tls_volume_mounts(); + result.extend([VolumeMount { + name: STACKABLE_TRUST_STORE_NAME.to_string(), + mount_path: STACKABLE_TRUST_STORE.to_string(), + ..Default::default() + }]); + result + } + + // The list of volume mounts for TLS CAs. + fn tls_volume_mounts(&self) -> Vec { + self.s3_buckets + .iter() + .flat_map(|bucket| bucket.connection.tls.volumes_and_mounts()) + .flat_map(|(_, mount)| mount) + .collect() + } + + // The volume where the truststore is written to by the init container. + fn truststore_volume(&self) -> Volume { + Volume { + name: STACKABLE_TRUST_STORE_NAME.to_string(), + empty_dir: Some(Default::default()), + ..Default::default() + } + } + + fn secret_class_tls_ca_paths(&self) -> Vec { + // List of ca.crt files mounted by the secret classes. + self.s3_buckets + .iter() + .flat_map(|bucket| bucket.connection.tls.tls_ca_cert_mount_path()) + .collect() + } + + fn build_truststore_from_pem_ca_command(&self) -> Option { + let input_ca_paths: Vec = self.secret_class_tls_ca_paths(); + + let out_truststore_path = format!("{STACKABLE_TRUST_STORE}/truststore.p12"); + + if input_ca_paths.is_empty() { + None + } else { + Some(formatdoc! { " + cert-tools generate-pkcs12-truststore --out {out_truststore_path} --out-password {STACKABLE_TLS_STORE_PASSWORD} {pem_args}", + pem_args = input_ca_paths + .iter() + .map(|path| format!("--pem {path}")) + .collect::>() + .join(" ") + }) + } + } + + fn extra_java_options(&self) -> Option>> { + if self.build_truststore_from_pem_ca_command().is_some() { + let mut ssl_options = BTreeMap::new(); + ssl_options.insert( + "-Djavax.net.ssl.trustStore".to_string(), + format!("{STACKABLE_TRUST_STORE}/truststore.p12"), + ); + ssl_options.insert( + "-Djavax.net.ssl.trustStorePassword".to_string(), + STACKABLE_TLS_STORE_PASSWORD.to_string(), + ); + ssl_options.insert( + "-Djavax.net.ssl.trustStoreType".to_string(), + "pkcs12".to_string(), + ); + + let ssl_options_str = ssl_options + .into_iter() + .map(|(k, v)| format!("{k}={v}")) + .collect::>() + .join(" "); + + Some(BTreeMap::from([ + ( + "spark.driver.extraJavaOptions".to_string(), + Some(ssl_options_str.clone()), + ), + ( + "spark.executor.extraJavaOptions".to_string(), + Some(ssl_options_str), + ), + ])) + } else { + None + } + } } diff --git a/rust/operator-binary/src/connect/server.rs b/rust/operator-binary/src/connect/server.rs index 9e27e241..4c156b76 100644 --- a/rust/operator-binary/src/connect/server.rs +++ b/rust/operator-binary/src/connect/server.rs @@ -259,6 +259,8 @@ pub(crate) fn build_stateful_set( .build(), ) .context(AddVolumeSnafu)? + // This is needed for shared enpryDir volumes with other containers like the truststore + // init container. .security_context(PodSecurityContext { fs_group: Some(1000), ..PodSecurityContext::default() @@ -363,7 +365,7 @@ pub(crate) fn build_stateful_set( .context(BuildListenerVolumeSnafu)?, ]); - // Add any secret volumes needed for the configured S3 buckets + // S3: Add secret volumes needed for accessing S3 buckets. pb.add_volumes( resolved_s3_buckets .volumes_and_mounts() @@ -372,6 +374,13 @@ pub(crate) fn build_stateful_set( ) .context(AddVolumeSnafu)?; + // S3: Add truststore init container for S3 endpoint communication with TLS. + if let Some(truststore_init_container) = + resolved_s3_buckets.truststore_init_container(resolved_product_image.clone()) + { + pb.add_init_container(truststore_init_container); + } + // Merge user defined pod template if available let mut pod_template = pb.build_template(); if let Some(pod_overrides_spec) = scs diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index fb3e3e8e..f8662181 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -62,8 +62,7 @@ dimensions: - name: s3-use-tls values: - "false" - # TODO: re-enable when the spark-connect implementation works. - # - "true" + - "true" tests: - name: smoke dimensions: From 0daee5e52db3cbb6b67ff8f01a131c5aab2b2c32 Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Fri, 6 Feb 2026 17:23:33 +0100 Subject: [PATCH 08/11] cleanup --- .../kuttl/spark-connect/20-run-connect-client.yaml.j2 | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/tests/templates/kuttl/spark-connect/20-run-connect-client.yaml.j2 b/tests/templates/kuttl/spark-connect/20-run-connect-client.yaml.j2 index 63a2dd3b..6d2b5f15 100644 --- a/tests/templates/kuttl/spark-connect/20-run-connect-client.yaml.j2 +++ b/tests/templates/kuttl/spark-connect/20-run-connect-client.yaml.j2 @@ -76,17 +76,6 @@ spec: requests: cpu: 200m memory: 128Mi - # env: - # - name: AWS_ACCESS_KEY_ID - # valueFrom: - # secretKeyRef: - # name: minio-credentials - # key: accessKey - # - name: AWS_SECRET_ACCESS_KEY - # valueFrom: - # secretKeyRef: - # name: minio-credentials - # key: secretKey volumeMounts: - name: spark-connect-client mountPath: /app From 4253fe99da27805b2fb8a32705cf217ec892f992 Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Fri, 6 Feb 2026 17:25:08 +0100 Subject: [PATCH 09/11] update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6a157f1f..da965908 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ All notable changes to this project will be documented in this file. This has the effect that applications where the `spark-submit` Pod fails are not resubmitted. Previously, Jobs were retried at most 6 times by default ([#647]). - Support for Spark `3.5.8` ([#650]). +- First class support for S3 buckets on Spark connect clusters ([#652]). ### Changed @@ -34,6 +35,7 @@ All notable changes to this project will be documented in this file. [#649]: https://github.com/stackabletech/spark-k8s-operator/pull/649 [#650]: https://github.com/stackabletech/spark-k8s-operator/pull/650 [#651]: https://github.com/stackabletech/spark-k8s-operator/pull/651 +[#652]: https://github.com/stackabletech/spark-k8s-operator/pull/652 ## [25.11.0] - 2025-11-07 From 5f144cfbe6373951d3865e31a211b268a65c76c6 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 6 Feb 2026 17:58:20 +0100 Subject: [PATCH 10/11] Update time to fix RUSTSEC-2026-0009 --- Cargo.lock | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e5e2ccca..ee6ec417 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -566,9 +566,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.0" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" dependencies = [ "powerfmt", ] @@ -1633,9 +1633,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-traits" @@ -2324,10 +2324,11 @@ checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] @@ -2341,11 +2342,20 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", @@ -2785,30 +2795,30 @@ dependencies = [ [[package]] name = "time" -version = "0.3.41" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", From c4386c08c7c8acba61da092285b9bed9ec757c35 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Fri, 6 Feb 2026 20:24:05 +0100 Subject: [PATCH 11/11] update nix --- Cargo.nix | 116 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 68 insertions(+), 48 deletions(-) diff --git a/Cargo.nix b/Cargo.nix index fc4f195a..a4046946 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -1728,9 +1728,9 @@ rec { }; "deranged" = rec { crateName = "deranged"; - version = "0.4.0"; + version = "0.5.5"; edition = "2021"; - sha256 = "13h6skwk411wzhf1l9l7d3yz5y6vg9d7s3dwhhb4a942r88nm7lw"; + sha256 = "11z5939gv2klp1r1lgrp4w5fnlkj18jqqf0h9zxmia3vkrjwpv7c"; authors = [ "Jacob Pratt " ]; @@ -1743,7 +1743,6 @@ rec { } ]; features = { - "default" = [ "std" ]; "macros" = [ "dep:deranged-macros" ]; "num" = [ "dep:num-traits" ]; "powerfmt" = [ "dep:powerfmt" ]; @@ -1751,10 +1750,9 @@ rec { "rand" = [ "rand08" "rand09" ]; "rand08" = [ "dep:rand08" ]; "rand09" = [ "dep:rand09" ]; - "serde" = [ "dep:serde" ]; - "std" = [ "alloc" ]; + "serde" = [ "dep:serde_core" ]; }; - resolvedDefaultFeatures = [ "alloc" "powerfmt" "std" ]; + resolvedDefaultFeatures = [ "default" "powerfmt" ]; }; "derive_more" = rec { crateName = "derive_more"; @@ -5350,9 +5348,9 @@ rec { }; "num-conv" = rec { crateName = "num-conv"; - version = "0.1.0"; + version = "0.2.0"; edition = "2021"; - sha256 = "1ndiyg82q73783jq18isi71a7mjh56wxrk52rlvyx0mi5z9ibmai"; + sha256 = "0l4hj7lp8zbb9am4j3p7vlcv47y9bbazinvnxx9zjhiwkibyr5yg"; libName = "num_conv"; authors = [ "Jacob Pratt " @@ -7665,35 +7663,34 @@ rec { }; "serde" = rec { crateName = "serde"; - version = "1.0.219"; - edition = "2018"; - sha256 = "1dl6nyxnsi82a197sd752128a4avm6mxnscywas1jq30srp2q3jz"; + version = "1.0.228"; + edition = "2021"; + sha256 = "17mf4hhjxv5m90g42wmlbc61hdhlm6j9hwfkpcnd72rpgzm993ls"; authors = [ "Erick Tryzelaar " "David Tolnay " ]; dependencies = [ { - name = "serde_derive"; - packageId = "serde_derive"; - optional = true; - } - { - name = "serde_derive"; - packageId = "serde_derive"; - target = { target, features }: false; + name = "serde_core"; + packageId = "serde_core"; + usesDefaultFeatures = false; + features = [ "result" ]; } - ]; - devDependencies = [ { name = "serde_derive"; packageId = "serde_derive"; + optional = true; } ]; features = { + "alloc" = [ "serde_core/alloc" ]; "default" = [ "std" ]; "derive" = [ "serde_derive" ]; + "rc" = [ "serde_core/rc" ]; "serde_derive" = [ "dep:serde_derive" ]; + "std" = [ "serde_core/std" ]; + "unstable" = [ "serde_core/unstable" ]; }; resolvedDefaultFeatures = [ "alloc" "default" "derive" "serde_derive" "std" ]; }; @@ -7718,11 +7715,38 @@ rec { ]; }; + "serde_core" = rec { + crateName = "serde_core"; + version = "1.0.228"; + edition = "2021"; + sha256 = "1bb7id2xwx8izq50098s5j2sqrrvk31jbbrjqygyan6ask3qbls1"; + authors = [ + "Erick Tryzelaar " + "David Tolnay " + ]; + dependencies = [ + { + name = "serde_derive"; + packageId = "serde_derive"; + target = { target, features }: false; + } + ]; + devDependencies = [ + { + name = "serde_derive"; + packageId = "serde_derive"; + } + ]; + features = { + "default" = [ "std" "result" ]; + }; + resolvedDefaultFeatures = [ "alloc" "result" "std" ]; + }; "serde_derive" = rec { crateName = "serde_derive"; - version = "1.0.219"; - edition = "2015"; - sha256 = "001azhjmj7ya52pmfiw4ppxm16nd44y15j2pf5gkcwrcgz7pc0jv"; + version = "1.0.228"; + edition = "2021"; + sha256 = "0y8xm7fvmr2kjcd029g9fijpndh8csv5m20g4bd76w8qschg4h6m"; procMacro = true; authors = [ "Erick Tryzelaar " @@ -9117,9 +9141,9 @@ rec { }; "time" = rec { crateName = "time"; - version = "0.3.41"; - edition = "2021"; - sha256 = "0h0cpiyya8cjlrh00d2r72bmgg4lsdcncs76qpwy0rn2kghijxla"; + version = "0.3.47"; + edition = "2024"; + sha256 = "0b7g9ly2iabrlgizliz6v5x23yq5d6bpp0mqz6407z1s526d8fvl"; authors = [ "Jacob Pratt " "Time contributors" @@ -9128,7 +9152,6 @@ rec { { name = "deranged"; packageId = "deranged"; - usesDefaultFeatures = false; features = [ "powerfmt" ]; } { @@ -9146,8 +9169,8 @@ rec { usesDefaultFeatures = false; } { - name = "serde"; - packageId = "serde"; + name = "serde_core"; + packageId = "serde_core"; optional = true; usesDefaultFeatures = false; } @@ -9166,52 +9189,49 @@ rec { name = "num-conv"; packageId = "num-conv"; } - { - name = "serde"; - packageId = "serde"; - usesDefaultFeatures = false; - features = [ "derive" ]; - } { name = "time-macros"; packageId = "time-macros"; } ]; features = { - "alloc" = [ "serde?/alloc" ]; + "alloc" = [ "serde_core?/alloc" ]; "default" = [ "std" ]; "formatting" = [ "dep:itoa" "std" "time-macros?/formatting" ]; - "large-dates" = [ "time-macros?/large-dates" ]; + "large-dates" = [ "time-core/large-dates" "time-macros?/large-dates" ]; "local-offset" = [ "std" "dep:libc" "dep:num_threads" ]; "macros" = [ "dep:time-macros" ]; "parsing" = [ "time-macros?/parsing" ]; "quickcheck" = [ "dep:quickcheck" "alloc" "deranged/quickcheck" ]; - "rand" = [ "dep:rand" "deranged/rand" ]; - "serde" = [ "dep:serde" "time-macros?/serde" "deranged/serde" ]; + "rand" = [ "rand08" "rand09" ]; + "rand08" = [ "dep:rand08" "deranged/rand08" ]; + "rand09" = [ "dep:rand09" "deranged/rand09" ]; + "serde" = [ "dep:serde_core" "time-macros?/serde" "deranged/serde" ]; "serde-human-readable" = [ "serde" "formatting" "parsing" ]; "serde-well-known" = [ "serde" "formatting" "parsing" ]; - "std" = [ "alloc" "deranged/std" ]; + "std" = [ "alloc" ]; "wasm-bindgen" = [ "dep:js-sys" ]; }; resolvedDefaultFeatures = [ "alloc" "default" "formatting" "parsing" "std" ]; }; "time-core" = rec { crateName = "time-core"; - version = "0.1.4"; - edition = "2021"; - sha256 = "0z5h9fknvdvbs2k2s1chpi3ab3jvgkfhdnqwrvixjngm263s7sf9"; + version = "0.1.8"; + edition = "2024"; + sha256 = "1jidl426mw48i7hjj4hs9vxgd9lwqq4vyalm4q8d7y4iwz7y353n"; libName = "time_core"; authors = [ "Jacob Pratt " "Time contributors" ]; - + features = { + }; }; "time-macros" = rec { crateName = "time-macros"; - version = "0.2.22"; - edition = "2021"; - sha256 = "0jcaxpw220han2bzbrdlpqhy1s5k9i8ri3lw6n5zv4zcja9p69im"; + version = "0.2.27"; + edition = "2024"; + sha256 = "058ja265waq275wxvnfwavbz9r1hd4dgwpfn7a1a9a70l32y8w1f"; procMacro = true; libName = "time_macros"; authors = [