diff --git a/src/lib/data/dataset/migrating.py b/src/lib/data/dataset/migrating.py index 50cfbbee2..9909ce567 100644 --- a/src/lib/data/dataset/migrating.py +++ b/src/lib/data/dataset/migrating.py @@ -150,6 +150,7 @@ def _dataset_migrate_worker( migrate_entry: DatasetMigrateEntry = worker_input.entry destination_backend = migrate_entry.destination_backend destination_region = migrate_entry.destination_region + destination_data_cred = destination_backend.resolved_data_credential def _callback( copy_input: copying.CopyWorkerInput, @@ -160,7 +161,11 @@ def _callback( relative_path=migrate_entry.relative_path, storage_path=os.path.join(destination_backend.uri, migrate_entry.source_checksum), url=os.path.join( - destination_backend.parse_uri_to_link(destination_region), + destination_backend.parse_uri_to_link( + destination_region, + override_url=destination_data_cred.override_url, + addressing_style=destination_data_cred.addressing_style, + ), migrate_entry.source_checksum, ), size=migrate_entry.size, diff --git a/src/lib/data/dataset/uploading.py b/src/lib/data/dataset/uploading.py index 91ce4a19b..45bc49335 100644 --- a/src/lib/data/dataset/uploading.py +++ b/src/lib/data/dataset/uploading.py @@ -146,6 +146,8 @@ def dataset_upload_remote_file_entry_generator( data_cred = storage_client.data_credential url_base = storage_backend.parse_uri_to_link( region=storage_backend.region(data_cred), + override_url=data_cred.override_url, + addressing_style=data_cred.addressing_style, ) # Iterate over the objects in the remote path. @@ -282,6 +284,7 @@ def _write_to_manifest_cache( etag = utils_common.etag_checksum(upload_entry.source) destination: storage.StorageBackend = upload_entry.destination + destination_data_cred = destination.resolved_data_credential def _callback( upload_input: uploading.UploadWorkerInput, @@ -293,7 +296,11 @@ def _callback( relative_path=upload_entry.relative_path, storage_path=os.path.join(destination.uri, etag), url=os.path.join( - destination.parse_uri_to_link(upload_entry.destination_region), + destination.parse_uri_to_link( + upload_entry.destination_region, + override_url=destination_data_cred.override_url, + addressing_style=destination_data_cred.addressing_style, + ), etag, ), size=upload_entry.size, diff --git a/src/lib/data/storage/backends/backends.py b/src/lib/data/storage/backends/backends.py index f1d0901ac..ebf90a5d9 100644 --- a/src/lib/data/storage/backends/backends.py +++ b/src/lib/data/storage/backends/backends.py @@ -296,10 +296,16 @@ def container_uri(self) -> str: return f'{self.profile}/{self.container}' @override - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: # pylint: disable=unused-argument """ - Returns the https link corresponding to the uri + Returns the https link corresponding to the uri. """ return f'https://{self.netloc}/v1/{self.namespace}/{self.container}/{self.path}'.rstrip('/') @@ -456,10 +462,41 @@ def container_uri(self) -> str: return f'{self.profile}' @override - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: """ - Returns the https link corresponding to the uri + Returns the https link corresponding to the uri. + + When the credential has an override_url (CAIOS, R2, Wasabi, MinIO, + etc.) we build a URL against that host. The default is virtual-host + addressing because that's what modern S3-compatibles expect; pass + addressing_style='path' for legacy bucket names or path-style-only + deployments. """ + if override_url: + parsed = parse.urlparse(override_url) + scheme = parsed.scheme or 'https' + if parsed.netloc: + host = parsed.netloc + base_path = parsed.path.rstrip('/') + else: + # No scheme prefix on override_url: urlparse drops the whole + # input into 'path'. Split off the host from any base path so + # reverse-proxied endpoints like 'gateway.example.com/s3' keep + # the '/s3' prefix in the resulting link. + bare = parsed.path.lstrip('/') + host, _, rest = bare.partition('/') + base_path = ('/' + rest).rstrip('/') if rest else '' + if addressing_style == 'path': + return ( + f'{scheme}://{host}{base_path}/{self.container}/{self.path}'.rstrip('/') + ) + return f'{scheme}://{self.container}.{host}{base_path}/{self.path}'.rstrip('/') return f'https://{self.container}.s3.{region}.amazonaws.com/{self.path}'.rstrip('/') @override @@ -658,10 +695,16 @@ def container_uri(self) -> str: return f'{self.profile}' @override - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: # pylint: disable=unused-argument """ - Returns the https link corresponding to the uri + Returns the https link corresponding to the uri. """ return ( f'https://storage.googleapis.com/storage/v1/b/{self.container}/o/{self.path}' @@ -778,10 +821,16 @@ def container_uri(self) -> str: return f'{self.profile}' @override - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: # pylint: disable=unused-argument """ - Returns the https link corresponding to the uri + Returns the https link corresponding to the uri. """ return f'https://{self.container}.{self.netloc}/{self.path}'.rstrip('/') @@ -893,10 +942,16 @@ def container_uri(self) -> str: return f'{self.profile}/{self.container}' @override - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: # pylint: disable=unused-argument """ - Returns the https link corresponding to the uri + Returns the https link corresponding to the uri. """ return f'{self.endpoint}/{self.container}/{self.path}'.rstrip('/') diff --git a/src/lib/data/storage/backends/common.py b/src/lib/data/storage/backends/common.py index 405c65757..bfafdd205 100644 --- a/src/lib/data/storage/backends/common.py +++ b/src/lib/data/storage/backends/common.py @@ -185,9 +185,24 @@ def container_uri(self) -> str: pass @abc.abstractmethod - def parse_uri_to_link(self, region: str) -> str: + def parse_uri_to_link( + self, + region: str, + *, + override_url: str | None = None, + addressing_style: str | None = None, + ) -> str: """ Returns the https link corresponding to the uri. + + Args: + region: Region used by AWS-pattern URLs when no override is given. + override_url: Custom endpoint (S3-compatible providers). When set, + S3 backends build a URL against this host rather than AWS S3. + Other backends ignore it. + addressing_style: 'virtual' or 'path'. Used by S3 backends with a + custom override_url to select host-based vs path-based URLs. + Defaults to virtual (the form modern S3-compatibles expect). """ pass diff --git a/src/lib/data/storage/backends/tests/test_backends.py b/src/lib/data/storage/backends/tests/test_backends.py index 1f0aaf089..12b6ff14b 100644 --- a/src/lib/data/storage/backends/tests/test_backends.py +++ b/src/lib/data/storage/backends/tests/test_backends.py @@ -563,6 +563,62 @@ def test_env_override_invalid_raises(self): self.assertIn("'virtua'", str(ctx.exception)) +class S3BackendParseUriToLinkTest(unittest.TestCase): + """Tests for S3Backend.parse_uri_to_link override-aware URL building.""" + + def _backend(self, uri: str = 's3://my-bucket/foo/bar') -> backends.S3Backend: + return cast(backends.S3Backend, backends.construct_storage_backend(uri)) + + def test_no_override_uses_aws_pattern(self): + """Without an override_url, fall back to the AWS-host pattern (preserved).""" + link = self._backend().parse_uri_to_link('us-east-1') + self.assertEqual(link, 'https://my-bucket.s3.us-east-1.amazonaws.com/foo/bar') + + def test_override_url_uses_virtual_host(self): + """With an override_url, build a virtual-host URL against the custom endpoint + (CAIOS, R2, Wasabi, MinIO with wildcard DNS).""" + link = self._backend().parse_uri_to_link( + 'US-EAST-14A', + override_url='https://cwobject.com', + ) + self.assertEqual(link, 'https://my-bucket.cwobject.com/foo/bar') + + def test_override_url_path_style(self): + """addressing_style='path' yields a path-style URL — needed for + localstack/MinIO setups without wildcard DNS.""" + link = self._backend().parse_uri_to_link( + 'us-east-1', + override_url='http://localstack-s3.osmo:4566', + addressing_style='path', + ) + self.assertEqual(link, 'http://localstack-s3.osmo:4566/my-bucket/foo/bar') + + def test_override_url_preserves_scheme(self): + """Scheme of the override is preserved in the link.""" + link = self._backend().parse_uri_to_link( + 'us-east-1', + override_url='http://minio.local:9000', + ) + self.assertEqual(link, 'http://my-bucket.minio.local:9000/foo/bar') + + def test_override_url_preserves_base_path_virtual_host(self): + """Reverse-proxied endpoint (gateway.example.com/s3) keeps the /s3 prefix.""" + link = self._backend().parse_uri_to_link( + 'us-east-1', + override_url='https://gateway.example.com/s3', + ) + self.assertEqual(link, 'https://my-bucket.gateway.example.com/s3/foo/bar') + + def test_override_url_preserves_base_path_path_style(self): + """Reverse-proxied endpoint with addressing_style=path preserves base path.""" + link = self._backend().parse_uri_to_link( + 'us-east-1', + override_url='https://gateway.example.com/s3', + addressing_style='path', + ) + self.assertEqual(link, 'https://gateway.example.com/s3/my-bucket/foo/bar') + + class S3BackendRegionTest(unittest.TestCase): """Tests for S3Backend.region() endpoint routing.""" diff --git a/src/service/core/data/data_service.py b/src/service/core/data/data_service.py index b89d64e16..260439038 100755 --- a/src/service/core/data/data_service.py +++ b/src/service/core/data/data_service.py @@ -19,6 +19,7 @@ import base64 import datetime import json +import mimetypes import shlex from typing import Any, Dict, List, Sequence import uuid @@ -121,14 +122,20 @@ def get_collection_info(postgres: connectors.PostgresConnector, dataset_rows = get_collection_datasets(postgres, bucket, name) bucket_config = postgres.get_dataset_configs().get_bucket_config(bucket) + default_cred = bucket_config.default_credential + override_url = default_cred.override_url if default_cred else None + addressing_style = default_cred.addressing_style if default_cred else None rows: List[objects.DataInfoCollectionEntry] = [] for row in dataset_rows: rows.append(objects.DataInfoCollectionEntry( name=row.name, version=row.version_id, - location=storage.construct_storage_backend(row.location)\ - .parse_uri_to_link(bucket_config.region), + location=storage.construct_storage_backend(row.location).parse_uri_to_link( + bucket_config.region, + override_url=override_url, + addressing_style=addressing_style, + ), uri=row.location, hash_location=row.hash_location, size=row.size)) @@ -177,6 +184,9 @@ def get_dataset_info(postgres: connectors.PostgresConnector, f'any entry fitting the parameters in bucket {bucket}.') bucket_config = postgres.get_dataset_configs().get_bucket_config(bucket) + default_cred = bucket_config.default_credential + override_url = default_cred.override_url if default_cred else None + addressing_style = default_cred.addressing_style if default_cred else None rows: List[objects.DataInfoDatasetEntry] = [] for row in dataset_rows: @@ -199,8 +209,11 @@ def get_dataset_info(postgres: connectors.PostgresConnector, last_used=row.last_used.replace(microsecond=0), size=row.size if row.size else 0, checksum=row.checksum if row.checksum else '', - location=storage.construct_storage_backend(row.location)\ - .parse_uri_to_link(bucket_config.region), + location=storage.construct_storage_backend(row.location).parse_uri_to_link( + bucket_config.region, + override_url=override_url, + addressing_style=addressing_style, + ), uri=row.location, metadata=row.metadata, tags=[element.tag for element in tags], @@ -993,6 +1006,80 @@ def get_info( versions=rows) +@router.get('/{bucket}/dataset/{name}/manifest') +def get_manifest( + bucket: objects.DatasetPattern, + name: objects.DatasetPattern, + version: str = fastapi.Query(...), +) -> List: + """ This api returns the manifest for a dataset version. """ + postgres = connectors.PostgresConnector.get_instance() + dataset_info = get_dataset(postgres, bucket=bucket, name=name) + + fetch_command = ''' + SELECT location FROM dataset_version + WHERE dataset_id = %s AND version_id = %s AND status = %s; + ''' + rows = postgres.execute_fetch_command( + fetch_command, (dataset_info.id, version, objects.DatasetStatus.READY.name)) + if not rows: + raise osmo_errors.OSMODatabaseError( + f'Version {version} not found for dataset {name} in bucket {bucket}.') + + bucket_config = postgres.get_dataset_configs().get_bucket_config(bucket) + client = storage.SingleObjectClient.create( + storage_uri=rows[0].location, + data_credential=bucket_config.default_credential, + ) + manifest_content = client.get_object_stream(as_io=True).read() + return json.loads(manifest_content) + + +@router.api_route('/{bucket}/dataset/{name}/file-content', methods=['GET', 'HEAD']) +def get_file_content( + bucket: objects.DatasetPattern, + name: objects.DatasetPattern, + storage_path: str = fastapi.Query(...), + filename: str | None = fastapi.Query(default=None), +) -> fastapi.responses.StreamingResponse: + """ + Streams file content from storage for the dataset file preview. + + storage_path is hash-keyed in the dataset layout (e.g. .../hashes/), + so it carries no extension that mimetypes.guess_type can use. The optional + filename param carries the original name (e.g. 'lipsum.txt') so we can + return a useful Content-Type. filename is purely for media-type guessing; + access control still hinges on storage_path's container check. + """ + postgres = connectors.PostgresConnector.get_instance() + dataset_info = get_dataset(postgres, bucket=bucket, name=name) + + # Validate that the storage path belongs to this dataset's hash storage + # prefix. Container-only matching would let a caller request any object + # in the same bucket (e.g. another dataset's manifest) via this endpoint. + requested_backend = storage.construct_storage_backend(storage_path) + dataset_backend = storage.construct_storage_backend(dataset_info.hash_location) + hash_prefix = dataset_backend.path.rstrip('/') + '/' + if (requested_backend.container != dataset_backend.container + or not requested_backend.path.startswith(hash_prefix)): + raise osmo_errors.OSMOUserError( + 'Storage path does not belong to this dataset.') + + bucket_config = postgres.get_dataset_configs().get_bucket_config(bucket) + client = storage.SingleObjectClient.create( + storage_uri=storage_path, + data_credential=bucket_config.default_credential, + ) + + content_type = ( + mimetypes.guess_type(filename)[0] if filename else None + ) or mimetypes.guess_type(storage_path)[0] or 'application/octet-stream' + return fastapi.responses.StreamingResponse( + client.get_object_stream(), + media_type=content_type, + ) + + @router.get('/list_dataset', response_model=objects.DataListResponse, deprecated=True) def list_dataset_from_bucket(name: objects.DatasetPattern | None = None, user: List[str] | None = fastapi.Query(default = None), @@ -1154,6 +1241,9 @@ def query_dataset( postgres = connectors.PostgresConnector.get_instance() bucket_config = postgres.get_dataset_configs().get_bucket_config(str(bucket)) + default_cred = bucket_config.default_credential + override_url = default_cred.override_url if default_cred else None + addressing_style = default_cred.addressing_style if default_cred else None query_term = query.QueryParser.get_instance().parse(' '.join(command_parsed)) @@ -1184,8 +1274,11 @@ def query_dataset( last_used=row.last_used.replace(microsecond=0), size=row.size if row.size else 0, checksum=row.checksum if row.checksum else '', - location=storage.construct_storage_backend(row.location)\ - .parse_uri_to_link(bucket_config.region), + location=storage.construct_storage_backend(row.location).parse_uri_to_link( + bucket_config.region, + override_url=override_url, + addressing_style=addressing_style, + ), uri=row.location, metadata=row.metadata, tags=[], diff --git a/src/ui/next.config.ts b/src/ui/next.config.ts index d65685c4e..32732d8d5 100644 --- a/src/ui/next.config.ts +++ b/src/ui/next.config.ts @@ -198,9 +198,6 @@ const nextConfig: NextConfig = { // This allows Turbopack aliasing to work (aliases work for imports, not file discovery) "@/app/api/[...path]/route.impl": "@/app/api/[...path]/route.impl.production", - // Dataset manifest server action - alias to production version (zero mock code) - "@/lib/api/server/dataset-actions": "@/lib/api/server/dataset-actions.production", - // Dataset file proxy route - alias to production version (zero mock code) "@/app/proxy/dataset/file/route.impl": "@/app/proxy/dataset/file/route.impl.production", diff --git a/src/ui/src/app/proxy/dataset/file/route.impl.production.ts b/src/ui/src/app/proxy/dataset/file/route.impl.production.ts index 17c7ef928..408569322 100644 --- a/src/ui/src/app/proxy/dataset/file/route.impl.production.ts +++ b/src/ui/src/app/proxy/dataset/file/route.impl.production.ts @@ -17,56 +17,104 @@ /** * Dataset File Proxy — Production Implementation * - * Server-side proxy for fetching dataset files from storage URLs. - * Routes requests through the server to avoid CSP restrictions. + * Server-side proxy for fetching dataset files. All requests route through + * the backend service's authenticated /file-content endpoint, which signs + * the upstream call against the bucket credential. The proxy itself does + * not accept arbitrary URLs — that would be an SSRF vector + * (cloud-metadata endpoints, RFC1918 ranges, loopback). Manifests since + * #795 always carry storage_path, so callers no longer need a url= form. * - * GET /proxy/dataset/file?url={encodedFileUrl} → streams file content - * HEAD /proxy/dataset/file?url={encodedFileUrl} → returns headers only + * GET /proxy/dataset/file?bucket=...&name=...&storagePath=... → service proxy + * HEAD variant → headers only */ -const FORWARDED_HEADERS = ["content-type", "content-length", "last-modified", "etag", "cache-control"] as const; +import type { NextRequest } from "next/server"; +import { getServerApiBaseUrl } from "@/lib/api/server/config"; +import { forwardAuthHeaders } from "@/lib/api/server/proxy-headers"; -function parseAndValidateUrl(request: Request): { url: string } | Response { - const { searchParams } = new URL(request.url); - const url = searchParams.get("url"); +const FORWARDED_REQUEST_HEADERS = ["range"] as const; +// Upstream cache directives are intentionally NOT forwarded — this is a +// per-user authenticated route and any 'public' / 'max-age' from the storage +// provider could let an intermediate cache serve another user's bytes. We +// override with 'private, no-store' below. 'vary' is similarly not forwarded +// for the same reason. +const FORWARDED_RESPONSE_HEADERS = [ + "content-type", + "content-length", + "last-modified", + "etag", + "accept-ranges", + "content-range", +] as const; - if (!url) { - return Response.json({ error: "url parameter is required" }, { status: 400 }); +function forwardRequestHeaders(request: NextRequest, base: HeadersInit = {}): HeadersInit { + const headers = new Headers(base); + for (const header of FORWARDED_REQUEST_HEADERS) { + const value = request.headers.get(header); + if (value) headers.set(header, value); } + return headers; +} - if (!url.startsWith("http://") && !url.startsWith("https://")) { - return Response.json({ error: "Only http/https URLs are supported" }, { status: 400 }); +function forwardResponseHeaders(upstream: Response): Headers { + const headers = new Headers(); + for (const header of FORWARDED_RESPONSE_HEADERS) { + const value = upstream.headers.get(header); + if (value) headers.set(header, value); } + // Authenticated, per-user — never let an intermediate cache hold this. + headers.set("cache-control", "private, no-store"); + return headers; +} - return { url }; +interface ServiceParams { + bucket: string; + name: string; + storagePath: string; + /** Original filename used by the service for Content-Type guessing only. + * storage_path is hash-keyed (no extension), so without this every file + * comes back as application/octet-stream. */ + filename?: string; } -export async function GET(request: Request) { - const result = parseAndValidateUrl(request); - if (result instanceof Response) return result; +function parseParams(request: Request): ServiceParams | Response { + const { searchParams } = new URL(request.url); - const upstream = await fetch(result.url); + const bucket = searchParams.get("bucket"); + const name = searchParams.get("name"); + const storagePath = searchParams.get("storagePath"); + const filename = searchParams.get("filename") ?? undefined; - const headers = new Headers(); - for (const header of FORWARDED_HEADERS) { - const value = upstream.headers.get(header); - if (value) headers.set(header, value); + if (!bucket || !name || !storagePath) { + return Response.json({ error: "bucket, name, and storagePath query params are required" }, { status: 400 }); } - return new Response(upstream.body, { status: upstream.status, headers }); + return { bucket, name, storagePath, filename }; +} + +async function fetchUpstream(request: NextRequest, params: ServiceParams, method: string): Promise { + const backendUrl = getServerApiBaseUrl(); + const query = new URLSearchParams({ storage_path: params.storagePath }); + if (params.filename) query.set("filename", params.filename); + const serviceUrl = `${backendUrl}/api/bucket/${encodeURIComponent(params.bucket)}/dataset/${encodeURIComponent(params.name)}/file-content?${query}`; + const headers = forwardRequestHeaders(request, forwardAuthHeaders(request)); + return fetch(serviceUrl, { method, headers }); } -export async function HEAD(request: Request) { - const result = parseAndValidateUrl(request); +export async function GET(request: NextRequest) { + const result = parseParams(request); if (result instanceof Response) return result; - const upstream = await fetch(result.url, { method: "HEAD" }); + const upstream = await fetchUpstream(request, result, "GET"); + const headers = forwardResponseHeaders(upstream); + return new Response(upstream.body, { status: upstream.status, headers }); +} - const headers = new Headers(); - for (const header of FORWARDED_HEADERS) { - const value = upstream.headers.get(header); - if (value) headers.set(header, value); - } +export async function HEAD(request: NextRequest) { + const result = parseParams(request); + if (result instanceof Response) return result; + const upstream = await fetchUpstream(request, result, "HEAD"); + const headers = forwardResponseHeaders(upstream); return new Response(null, { status: upstream.status, headers }); } diff --git a/src/ui/src/features/datasets/detail/components/dataset-detail-content.tsx b/src/ui/src/features/datasets/detail/components/dataset-detail-content.tsx index 2e3c5e741..c05137402 100644 --- a/src/ui/src/features/datasets/detail/components/dataset-detail-content.tsx +++ b/src/ui/src/features/datasets/detail/components/dataset-detail-content.tsx @@ -176,6 +176,8 @@ export function DatasetDetailContent({ bucket, name }: Props) { const { versions, location, + resolvedName, + resolvedVersion, files: virtualFiles, memberSubPath, segmentLabels, @@ -184,6 +186,8 @@ export function DatasetDetailContent({ bucket, name }: Props) { return { versions: [], location: null as string | null, + resolvedName: name, + resolvedVersion: "", files: null as DatasetFile[] | null, memberSubPath: "", segmentLabels: {} as Record, @@ -197,6 +201,8 @@ export function DatasetDetailContent({ bucket, name }: Props) { return { versions: detail.versions, location: currentVersionData?.location ?? null, + resolvedName: name, + resolvedVersion: currentVersionData?.version ?? "", files: null, memberSubPath: path, segmentLabels: {}, @@ -221,6 +227,8 @@ export function DatasetDetailContent({ bucket, name }: Props) { return { versions: [], location: null, + resolvedName: name, + resolvedVersion: "", files: memberEntries, memberSubPath: "", segmentLabels: labels, @@ -234,11 +242,13 @@ export function DatasetDetailContent({ bucket, name }: Props) { return { versions: [], location: member?.location ?? null, + resolvedName: member?.name ?? name, + resolvedVersion: member?.version ?? "", files: null, memberSubPath: subPath, segmentLabels: labels, }; - }, [detail, version, path]); + }, [detail, version, path, name]); // ========================================================================== // File listing — fetch manifest for selected version/member @@ -249,7 +259,7 @@ export function DatasetDetailContent({ bucket, name }: Props) { isLoading: isFilesLoading, error: filesError, refetch: refetchFiles, - } = useDatasetFiles(location); + } = useDatasetFiles(bucket, resolvedName, resolvedVersion, location); // Normal (unfiltered) directory listing — used for FilterBar suggestions and as base view const normalFiles = useMemo( @@ -573,6 +583,8 @@ export function DatasetDetailContent({ bucket, name }: Props) { )} diff --git a/src/ui/src/features/datasets/detail/components/file-preview-panel.tsx b/src/ui/src/features/datasets/detail/components/file-preview-panel.tsx index 9274ecdef..734a0a4cf 100644 --- a/src/ui/src/features/datasets/detail/components/file-preview-panel.tsx +++ b/src/ui/src/features/datasets/detail/components/file-preview-panel.tsx @@ -24,7 +24,7 @@ * - image/* → via proxy * - video/* →