From 5cdb10b2e6d3ca592299fb68938ca84d5c93260f Mon Sep 17 00:00:00 2001 From: Hans Arnholm Date: Tue, 5 May 2026 13:02:10 -0700 Subject: [PATCH 01/11] fix(storage): build dataset URLs against override_url for S3-compatible backends MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit S3Backend.parse_uri_to_link hardcoded the AWS pattern 'https://.s3..amazonaws.com/...' regardless of credential override_url. The UI consumes these URLs to fetch dataset content; for a CAIOS-backed bucket this produced 'osmo-on-cw-dev-harnholm-datasets.s3.us-east-14a.amazonaws.com', which ENOTFOUNDs in DNS. Thread the credential's override_url and addressing_style through parse_uri_to_link. When override_url is set, S3Backend builds a URL against that host (virtual-host by default; addressing_style='path' yields path-style for localstack/MinIO without wildcard DNS). AWS S3 behavior (no override_url) is unchanged. Other backends accept the new kwargs but ignore them. Updated callsites: - data_service.py: dataset listing payload now respects bucket config's default_credential.override_url. This is the immediate UI fix. - dataset/uploading.py (×2) + migrating.py: new uploads/migrations record the correct URL in the manifest. Existing manifest entries persisted with AWS-pattern URLs are not migrated (separate concern). Co-Authored-By: Claude Opus 4.7 (1M context) --- src/lib/data/dataset/migrating.py | 7 +- src/lib/data/dataset/uploading.py | 9 ++- src/lib/data/storage/backends/backends.py | 65 +++++++++++++++---- src/lib/data/storage/backends/common.py | 17 ++++- .../storage/backends/tests/test_backends.py | 39 +++++++++++ src/service/core/data/data_service.py | 20 ++++-- 6 files changed, 139 insertions(+), 18 deletions(-) diff --git a/src/lib/data/dataset/migrating.py b/src/lib/data/dataset/migrating.py index 50cfbbee2..9909ce567 100644 --- a/src/lib/data/dataset/migrating.py +++ b/src/lib/data/dataset/migrating.py @@ -150,6 +150,7 @@ def _dataset_migrate_worker( migrate_entry: DatasetMigrateEntry = worker_input.entry destination_backend = migrate_entry.destination_backend destination_region = migrate_entry.destination_region + destination_data_cred = destination_backend.resolved_data_credential def _callback( copy_input: copying.CopyWorkerInput, @@ -160,7 +161,11 @@ def _callback( relative_path=migrate_entry.relative_path, storage_path=os.path.join(destination_backend.uri, migrate_entry.source_checksum), url=os.path.join( - destination_backend.parse_uri_to_link(destination_region), + destination_backend.parse_uri_to_link( + destination_region, + override_url=destination_data_cred.override_url, + addressing_style=destination_data_cred.addressing_style, + ), migrate_entry.source_checksum, ), size=migrate_entry.size, diff --git a/src/lib/data/dataset/uploading.py b/src/lib/data/dataset/uploading.py index 91ce4a19b..45bc49335 100644 --- a/src/lib/data/dataset/uploading.py +++ b/src/lib/data/dataset/uploading.py @@ -146,6 +146,8 @@ def dataset_upload_remote_file_entry_generator( data_cred = storage_client.data_credential url_base = storage_backend.parse_uri_to_link( region=storage_backend.region(data_cred), + override_url=data_cred.override_url, + addressing_style=data_cred.addressing_style, ) # Iterate over the objects in the remote path. @@ -282,6 +284,7 @@ def _write_to_manifest_cache( etag = utils_common.etag_checksum(upload_entry.source) destination: storage.StorageBackend = upload_entry.destination + destination_data_cred = destination.resolved_data_credential def _callback( upload_input: uploading.UploadWorkerInput, @@ -293,7 +296,11 @@ def _callback( relative_path=upload_entry.relative_path, storage_path=os.path.join(destination.uri, etag), url=os.path.join( - destination.parse_uri_to_link(upload_entry.destination_region), + destination.parse_uri_to_link( + upload_entry.destination_region, + override_url=destination_data_cred.override_url, + addressing_style=destination_data_cred.addressing_style, + ), etag, ), size=upload_entry.size, diff --git a/src/lib/data/storage/backends/backends.py b/src/lib/data/storage/backends/backends.py index f1d0901ac..55d592b77 100644 --- a/src/lib/data/storage/backends/backends.py +++ b/src/lib/data/storage/backends/backends.py @@ -296,10 +296,16 @@ def container_uri(self) -> str: return f'{self.profile}/{self.container}' @override - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: # pylint: disable=unused-argument """ - Returns the https link corresponding to the uri + Returns the https link corresponding to the uri. """ return f'https://{self.netloc}/v1/{self.namespace}/{self.container}/{self.path}'.rstrip('/') @@ -456,10 +462,29 @@ def container_uri(self) -> str: return f'{self.profile}' @override - def parse_uri_to_link(self, region: str) -> str: - """ - Returns the https link corresponding to the uri + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: """ + Returns the https link corresponding to the uri. + + When the credential has an override_url (CAIOS, R2, Wasabi, MinIO, + etc.) we build a URL against that host. The default is virtual-host + addressing because that's what modern S3-compatibles expect; pass + addressing_style='path' for legacy bucket names or path-style-only + deployments. + """ + if override_url: + parsed = parse.urlparse(override_url) + scheme = parsed.scheme or 'https' + host = parsed.netloc or parsed.path + if addressing_style == 'path': + return f'{scheme}://{host}/{self.container}/{self.path}'.rstrip('/') + return f'{scheme}://{self.container}.{host}/{self.path}'.rstrip('/') return f'https://{self.container}.s3.{region}.amazonaws.com/{self.path}'.rstrip('/') @override @@ -658,10 +683,16 @@ def container_uri(self) -> str: return f'{self.profile}' @override - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: # pylint: disable=unused-argument """ - Returns the https link corresponding to the uri + Returns the https link corresponding to the uri. """ return ( f'https://storage.googleapis.com/storage/v1/b/{self.container}/o/{self.path}' @@ -778,10 +809,16 @@ def container_uri(self) -> str: return f'{self.profile}' @override - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: # pylint: disable=unused-argument """ - Returns the https link corresponding to the uri + Returns the https link corresponding to the uri. """ return f'https://{self.container}.{self.netloc}/{self.path}'.rstrip('/') @@ -893,10 +930,16 @@ def container_uri(self) -> str: return f'{self.profile}/{self.container}' @override - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: # pylint: disable=unused-argument """ - Returns the https link corresponding to the uri + Returns the https link corresponding to the uri. """ return f'{self.endpoint}/{self.container}/{self.path}'.rstrip('/') diff --git a/src/lib/data/storage/backends/common.py b/src/lib/data/storage/backends/common.py index 405c65757..bfafdd205 100644 --- a/src/lib/data/storage/backends/common.py +++ b/src/lib/data/storage/backends/common.py @@ -185,9 +185,24 @@ def container_uri(self) -> str: pass @abc.abstractmethod - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: """ Returns the https link corresponding to the uri. + + Args: + region: Region used by AWS-pattern URLs when no override is given. + override_url: Custom endpoint (S3-compatible providers). When set, + S3 backends build a URL against this host rather than AWS S3. + Other backends ignore it. + addressing_style: 'virtual' or 'path'. Used by S3 backends with a + custom override_url to select host-based vs path-based URLs. + Defaults to virtual (the form modern S3-compatibles expect). """ pass diff --git a/src/lib/data/storage/backends/tests/test_backends.py b/src/lib/data/storage/backends/tests/test_backends.py index 1f0aaf089..88ec88404 100644 --- a/src/lib/data/storage/backends/tests/test_backends.py +++ b/src/lib/data/storage/backends/tests/test_backends.py @@ -563,6 +563,45 @@ def test_env_override_invalid_raises(self): self.assertIn("'virtua'", str(ctx.exception)) +class S3BackendParseUriToLinkTest(unittest.TestCase): + """Tests for S3Backend.parse_uri_to_link override-aware URL building.""" + + def _backend(self, uri: str = 's3://my-bucket/foo/bar') -> backends.S3Backend: + return cast(backends.S3Backend, backends.construct_storage_backend(uri)) + + def test_no_override_uses_aws_pattern(self): + """Without an override_url, fall back to the AWS-host pattern (preserved).""" + link = self._backend().parse_uri_to_link('us-east-1') + self.assertEqual(link, 'https://my-bucket.s3.us-east-1.amazonaws.com/foo/bar') + + def test_override_url_uses_virtual_host(self): + """With an override_url, build a virtual-host URL against the custom endpoint + (CAIOS, R2, Wasabi, MinIO with wildcard DNS).""" + link = self._backend().parse_uri_to_link( + 'US-EAST-14A', + override_url='https://cwobject.com', + ) + self.assertEqual(link, 'https://my-bucket.cwobject.com/foo/bar') + + def test_override_url_path_style(self): + """addressing_style='path' yields a path-style URL — needed for + localstack/MinIO setups without wildcard DNS.""" + link = self._backend().parse_uri_to_link( + 'us-east-1', + override_url='http://localstack-s3.osmo:4566', + addressing_style='path', + ) + self.assertEqual(link, 'http://localstack-s3.osmo:4566/my-bucket/foo/bar') + + def test_override_url_preserves_scheme(self): + """Scheme of the override is preserved in the link.""" + link = self._backend().parse_uri_to_link( + 'us-east-1', + override_url='http://minio.local:9000', + ) + self.assertEqual(link, 'http://my-bucket.minio.local:9000/foo/bar') + + class S3BackendRegionTest(unittest.TestCase): """Tests for S3Backend.region() endpoint routing.""" diff --git a/src/service/core/data/data_service.py b/src/service/core/data/data_service.py index b89d64e16..64a8bd652 100755 --- a/src/service/core/data/data_service.py +++ b/src/service/core/data/data_service.py @@ -121,14 +121,20 @@ def get_collection_info(postgres: connectors.PostgresConnector, dataset_rows = get_collection_datasets(postgres, bucket, name) bucket_config = postgres.get_dataset_configs().get_bucket_config(bucket) + default_cred = bucket_config.default_credential + override_url = default_cred.override_url if default_cred else None + addressing_style = default_cred.addressing_style if default_cred else None rows: List[objects.DataInfoCollectionEntry] = [] for row in dataset_rows: rows.append(objects.DataInfoCollectionEntry( name=row.name, version=row.version_id, - location=storage.construct_storage_backend(row.location)\ - .parse_uri_to_link(bucket_config.region), + location=storage.construct_storage_backend(row.location).parse_uri_to_link( + bucket_config.region, + override_url=override_url, + addressing_style=addressing_style, + ), uri=row.location, hash_location=row.hash_location, size=row.size)) @@ -177,6 +183,9 @@ def get_dataset_info(postgres: connectors.PostgresConnector, f'any entry fitting the parameters in bucket {bucket}.') bucket_config = postgres.get_dataset_configs().get_bucket_config(bucket) + default_cred = bucket_config.default_credential + override_url = default_cred.override_url if default_cred else None + addressing_style = default_cred.addressing_style if default_cred else None rows: List[objects.DataInfoDatasetEntry] = [] for row in dataset_rows: @@ -199,8 +208,11 @@ def get_dataset_info(postgres: connectors.PostgresConnector, last_used=row.last_used.replace(microsecond=0), size=row.size if row.size else 0, checksum=row.checksum if row.checksum else '', - location=storage.construct_storage_backend(row.location)\ - .parse_uri_to_link(bucket_config.region), + location=storage.construct_storage_backend(row.location).parse_uri_to_link( + bucket_config.region, + override_url=override_url, + addressing_style=addressing_style, + ), uri=row.location, metadata=row.metadata, tags=[element.tag for element in tags], From e47af4e9398bae2a257020b6129ed80374fcd720 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 2 Apr 2026 23:36:56 +0000 Subject: [PATCH 02/11] fix: route dataset file browser and preview through service to support private buckets (#793) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The UI's fetchManifest and file preview proxy performed unsigned fetch() against S3 HTTPS URLs, which fails with 403 on private buckets. Added two service-side proxy endpoints: - GET /{bucket}/dataset/{name}/manifest — reads manifest JSON from storage using bucket credentials (supports S3, GCS, Azure, Swift, TOS) - GET /{bucket}/dataset/{name}/file-content — streams individual file content with storage_path validation against dataset container Updated UI to call these endpoints through the existing /api catch-all proxy instead of direct unsigned fetch. Removed unused fetchManifest server action files. --- src/service/core/data/data_service.py | 60 ++++++++++++ src/ui/next.config.ts | 3 - .../dataset/file/route.impl.production.ts | 95 +++++++++++++------ .../components/dataset-detail-content.tsx | 16 +++- .../detail/components/file-preview-panel.tsx | 67 +++++++++---- src/ui/src/lib/api/adapter/datasets-hooks.ts | 7 +- src/ui/src/lib/api/adapter/datasets.ts | 21 +++- .../api/server/dataset-actions.production.ts | 31 ------ src/ui/src/lib/api/server/dataset-actions.ts | 59 ------------ src/ui/src/mocks/handlers.ts | 14 +++ 10 files changed, 224 insertions(+), 149 deletions(-) delete mode 100644 src/ui/src/lib/api/server/dataset-actions.production.ts delete mode 100644 src/ui/src/lib/api/server/dataset-actions.ts diff --git a/src/service/core/data/data_service.py b/src/service/core/data/data_service.py index 64a8bd652..7001b0fcc 100755 --- a/src/service/core/data/data_service.py +++ b/src/service/core/data/data_service.py @@ -19,6 +19,7 @@ import base64 import datetime import json +import mimetypes import shlex from typing import Any, Dict, List, Sequence import uuid @@ -1005,6 +1006,65 @@ def get_info( versions=rows) +@router.get('/{bucket}/dataset/{name}/manifest') +def get_manifest( + bucket: objects.DatasetPattern, + name: objects.DatasetPattern, + version: str = fastapi.Query(...), +) -> List: + """ This api returns the manifest for a dataset version. """ + postgres = connectors.PostgresConnector.get_instance() + dataset_info = get_dataset(postgres, bucket=bucket, name=name) + + fetch_command = ''' + SELECT location FROM dataset_version + WHERE dataset_id = %s AND version_id = %s AND status = %s; + ''' + rows = postgres.execute_fetch_command( + fetch_command, (dataset_info.id, version, objects.DatasetStatus.READY.name)) + if not rows: + raise osmo_errors.OSMODatabaseError( + f'Version {version} not found for dataset {name} in bucket {bucket}.') + + bucket_config = postgres.get_dataset_configs().get_bucket_config(bucket) + client = storage.Client.create( + storage_uri=rows[0].location, + data_credential=bucket_config.default_credential, + ) + manifest_content = client.get_object_stream(as_io=True).read() + return json.loads(manifest_content) + + +@router.get('/{bucket}/dataset/{name}/file-content') +def get_file_content( + bucket: objects.DatasetPattern, + name: objects.DatasetPattern, + storage_path: str = fastapi.Query(...), +) -> fastapi.responses.StreamingResponse: + """ This api streams file content from storage for the dataset file preview. """ + postgres = connectors.PostgresConnector.get_instance() + dataset_info = get_dataset(postgres, bucket=bucket, name=name) + + # Validate that the storage path belongs to this dataset's storage prefix + requested_backend = storage.construct_storage_backend(storage_path) + dataset_backend = storage.construct_storage_backend(dataset_info.hash_location) + if requested_backend.container != dataset_backend.container: + raise osmo_errors.OSMOUserError( + 'Storage path does not belong to this dataset.') + + bucket_config = postgres.get_dataset_configs().get_bucket_config(bucket) + client = storage.Client.create( + storage_uri=storage_path, + data_credential=bucket_config.default_credential, + ) + + content_type = mimetypes.guess_type(storage_path)[0] or 'application/octet-stream' + return fastapi.responses.StreamingResponse( + client.get_object_stream(), + media_type=content_type, + ) + + @router.get('/list_dataset', response_model=objects.DataListResponse, deprecated=True) def list_dataset_from_bucket(name: objects.DatasetPattern | None = None, user: List[str] | None = fastapi.Query(default = None), diff --git a/src/ui/next.config.ts b/src/ui/next.config.ts index d65685c4e..32732d8d5 100644 --- a/src/ui/next.config.ts +++ b/src/ui/next.config.ts @@ -198,9 +198,6 @@ const nextConfig: NextConfig = { // This allows Turbopack aliasing to work (aliases work for imports, not file discovery) "@/app/api/[...path]/route.impl": "@/app/api/[...path]/route.impl.production", - // Dataset manifest server action - alias to production version (zero mock code) - "@/lib/api/server/dataset-actions": "@/lib/api/server/dataset-actions.production", - // Dataset file proxy route - alias to production version (zero mock code) "@/app/proxy/dataset/file/route.impl": "@/app/proxy/dataset/file/route.impl.production", diff --git a/src/ui/src/app/proxy/dataset/file/route.impl.production.ts b/src/ui/src/app/proxy/dataset/file/route.impl.production.ts index 17c7ef928..2c5e3c0f5 100644 --- a/src/ui/src/app/proxy/dataset/file/route.impl.production.ts +++ b/src/ui/src/app/proxy/dataset/file/route.impl.production.ts @@ -18,55 +18,96 @@ * Dataset File Proxy — Production Implementation * * Server-side proxy for fetching dataset files from storage URLs. - * Routes requests through the server to avoid CSP restrictions. * - * GET /proxy/dataset/file?url={encodedFileUrl} → streams file content - * HEAD /proxy/dataset/file?url={encodedFileUrl} → returns headers only + * When bucket/name/storagePath params are present, routes through the backend + * service's file-content endpoint (handles private buckets with credentials). + * Falls back to direct fetch for legacy callers that only provide a url param. + * + * GET /proxy/dataset/file?bucket=...&name=...&storagePath=... → service proxy + * GET /proxy/dataset/file?url={encodedFileUrl} → direct fetch (legacy) + * HEAD variants of the above → headers only */ +import type { NextRequest } from "next/server"; +import { getServerApiBaseUrl } from "@/lib/api/server/config"; +import { forwardAuthHeaders } from "@/lib/api/server/proxy-headers"; + const FORWARDED_HEADERS = ["content-type", "content-length", "last-modified", "etag", "cache-control"] as const; -function parseAndValidateUrl(request: Request): { url: string } | Response { +function forwardResponseHeaders(upstream: Response): Headers { + const headers = new Headers(); + for (const header of FORWARDED_HEADERS) { + const value = upstream.headers.get(header); + if (value) headers.set(header, value); + } + return headers; +} + +interface ServiceParams { + bucket: string; + name: string; + storagePath: string; +} + +interface LegacyParams { + url: string; +} + +function parseParams(request: Request): ServiceParams | LegacyParams | Response { const { searchParams } = new URL(request.url); - const url = searchParams.get("url"); - if (!url) { - return Response.json({ error: "url parameter is required" }, { status: 400 }); + const bucket = searchParams.get("bucket"); + const name = searchParams.get("name"); + const storagePath = searchParams.get("storagePath"); + + if (bucket && name && storagePath) { + return { bucket, name, storagePath }; } + // Legacy fallback: direct URL fetch (works for public buckets only) + const url = searchParams.get("url"); + if (!url) { + return Response.json({ error: "bucket/name/storagePath or url parameter is required" }, { status: 400 }); + } if (!url.startsWith("http://") && !url.startsWith("https://")) { return Response.json({ error: "Only http/https URLs are supported" }, { status: 400 }); } - return { url }; } -export async function GET(request: Request) { - const result = parseAndValidateUrl(request); - if (result instanceof Response) return result; - - const upstream = await fetch(result.url); +function isServiceParams(params: ServiceParams | LegacyParams): params is ServiceParams { + return "storagePath" in params; +} - const headers = new Headers(); - for (const header of FORWARDED_HEADERS) { - const value = upstream.headers.get(header); - if (value) headers.set(header, value); +async function fetchUpstream( + request: NextRequest, + params: ServiceParams | LegacyParams, + method: string, +): Promise { + if (isServiceParams(params)) { + const backendUrl = getServerApiBaseUrl(); + const query = new URLSearchParams({ storage_path: params.storagePath }); + const serviceUrl = `${backendUrl}/api/bucket/${encodeURIComponent(params.bucket)}/dataset/${encodeURIComponent(params.name)}/file-content?${query}`; + const headers = forwardAuthHeaders(request); + return fetch(serviceUrl, { method, headers }); } - - return new Response(upstream.body, { status: upstream.status, headers }); + return fetch(params.url, { method }); } -export async function HEAD(request: Request) { - const result = parseAndValidateUrl(request); +export async function GET(request: NextRequest) { + const result = parseParams(request); if (result instanceof Response) return result; - const upstream = await fetch(result.url, { method: "HEAD" }); + const upstream = await fetchUpstream(request, result, "GET"); + const headers = forwardResponseHeaders(upstream); + return new Response(upstream.body, { status: upstream.status, headers }); +} - const headers = new Headers(); - for (const header of FORWARDED_HEADERS) { - const value = upstream.headers.get(header); - if (value) headers.set(header, value); - } +export async function HEAD(request: NextRequest) { + const result = parseParams(request); + if (result instanceof Response) return result; + const upstream = await fetchUpstream(request, result, "HEAD"); + const headers = forwardResponseHeaders(upstream); return new Response(null, { status: upstream.status, headers }); } diff --git a/src/ui/src/features/datasets/detail/components/dataset-detail-content.tsx b/src/ui/src/features/datasets/detail/components/dataset-detail-content.tsx index 2e3c5e741..c05137402 100644 --- a/src/ui/src/features/datasets/detail/components/dataset-detail-content.tsx +++ b/src/ui/src/features/datasets/detail/components/dataset-detail-content.tsx @@ -176,6 +176,8 @@ export function DatasetDetailContent({ bucket, name }: Props) { const { versions, location, + resolvedName, + resolvedVersion, files: virtualFiles, memberSubPath, segmentLabels, @@ -184,6 +186,8 @@ export function DatasetDetailContent({ bucket, name }: Props) { return { versions: [], location: null as string | null, + resolvedName: name, + resolvedVersion: "", files: null as DatasetFile[] | null, memberSubPath: "", segmentLabels: {} as Record, @@ -197,6 +201,8 @@ export function DatasetDetailContent({ bucket, name }: Props) { return { versions: detail.versions, location: currentVersionData?.location ?? null, + resolvedName: name, + resolvedVersion: currentVersionData?.version ?? "", files: null, memberSubPath: path, segmentLabels: {}, @@ -221,6 +227,8 @@ export function DatasetDetailContent({ bucket, name }: Props) { return { versions: [], location: null, + resolvedName: name, + resolvedVersion: "", files: memberEntries, memberSubPath: "", segmentLabels: labels, @@ -234,11 +242,13 @@ export function DatasetDetailContent({ bucket, name }: Props) { return { versions: [], location: member?.location ?? null, + resolvedName: member?.name ?? name, + resolvedVersion: member?.version ?? "", files: null, memberSubPath: subPath, segmentLabels: labels, }; - }, [detail, version, path]); + }, [detail, version, path, name]); // ========================================================================== // File listing — fetch manifest for selected version/member @@ -249,7 +259,7 @@ export function DatasetDetailContent({ bucket, name }: Props) { isLoading: isFilesLoading, error: filesError, refetch: refetchFiles, - } = useDatasetFiles(location); + } = useDatasetFiles(bucket, resolvedName, resolvedVersion, location); // Normal (unfiltered) directory listing — used for FilterBar suggestions and as base view const normalFiles = useMemo( @@ -573,6 +583,8 @@ export function DatasetDetailContent({ bucket, name }: Props) { )} diff --git a/src/ui/src/features/datasets/detail/components/file-preview-panel.tsx b/src/ui/src/features/datasets/detail/components/file-preview-panel.tsx index 9274ecdef..90dd58e8e 100644 --- a/src/ui/src/features/datasets/detail/components/file-preview-panel.tsx +++ b/src/ui/src/features/datasets/detail/components/file-preview-panel.tsx @@ -24,7 +24,7 @@ * - image/* → via proxy * - video/* →