From cb5d1ef6489d59c5bc0249dd1658a430f21e1b33 Mon Sep 17 00:00:00 2001 From: Elodie MORIN Date: Fri, 13 Mar 2026 07:50:12 +0100 Subject: [PATCH 1/6] fix(backend): audio path on OSEkit SpectroData replacement --- backend/utils/osekit_replace.py | 36 +++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/backend/utils/osekit_replace.py b/backend/utils/osekit_replace.py index 57828e3b2..d53e1ea19 100644 --- a/backend/utils/osekit_replace.py +++ b/backend/utils/osekit_replace.py @@ -37,6 +37,7 @@ class SpectroData: end: Timestamp duration: Timedelta audio_data: AudioData + files: list[TFile] def __init__( self, @@ -45,6 +46,7 @@ def __init__( end: Timestamp, v_lim: tuple[float, float], audio_data: AudioData, + files: list[TFile], ): self.name = name self.begin = begin @@ -52,6 +54,7 @@ def __init__( self.v_lim = v_lim self.duration = self.end - self.begin self.audio_data = audio_data + self.files = files class SpectroDataset: @@ -122,12 +125,10 @@ def from_json(json_path: Path) -> "SpectroDataset": files=[ TFile( path=join( - folder, - PureWindowsPath(file["path"]) - .as_posix() - .split(PureWindowsPath(folder).stem) - .pop() - .strip("/"), + folder.parent.parent, + make_path_relative( + file["path"], to=folder.parent.parent + ), ), begin=Timestamp( strptime_from_text( @@ -147,6 +148,29 @@ def from_json(json_path: Path) -> "SpectroDataset": ].items() ], ), + files=[ + TFile( + path=join( + folder.parent.parent, + make_path_relative( + file["path"], to=folder.parent.parent + ), + ), + begin=Timestamp( + strptime_from_text( + file["begin"], + datetime_template=TIMESTAMP_FORMATS_EXPORTED_FILES, + ) + ), + end=Timestamp( + strptime_from_text( + file["end"], + datetime_template=TIMESTAMP_FORMATS_EXPORTED_FILES, + ) + ), + ) + for name, file in spectro_data["files"].items() + ], ) for name, spectro_data in dataset_data["data"].items() ], From 47bfafef6e06fc7fd59ec689df3b55912f5c59c1 Mon Sep 17 00:00:00 2001 From: Elodie MORIN Date: Fri, 13 Mar 2026 07:59:15 +0100 Subject: [PATCH 2/6] fix(metadatax): update metadatax to fix migration on Postgres 9 --- poetry.lock | 8 ++++---- pyproject.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index e82114130..b0ebce3ff 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2227,13 +2227,13 @@ files = [ [[package]] name = "metadatax" -version = "0.5.2" +version = "0.5.3" description = "PAM acquisition metadata" optional = false python-versions = ">=3.9,<4.0" groups = ["main"] files = [ - {file = "v0.5.2.tar.gz", hash = "sha256:08ecc1053fb33051756327022eca4c313674a1274cde0966c9dc584ba3225673"}, + {file = "v0.5.3.tar.gz", hash = "sha256:52c25cee14ee7347c70f606a61ac416e54880b9fca605d0acef62ce84f43d8a6"}, ] [package.dependencies] @@ -2253,7 +2253,7 @@ python-dotenv = ">=1.0.1,<2.0.0" [package.source] type = "url" -url = "https://github.com/PAM-Standardization/metadatax/archive/refs/tags/v0.5.2.tar.gz" +url = "https://github.com/PAM-Standardization/metadatax/archive/refs/tags/v0.5.3.tar.gz" [[package]] name = "mistune" @@ -4076,4 +4076,4 @@ brotli = ["brotli"] [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "f95e45125abaed37c71a81e5bd82b88a60524e0e3e62c1c8742d9b39abfdd91a" +content-hash = "aff12d8c28e4ee43af0ee5d8972fcab00b38b2ea84fc3bc8a5fe755d05072eab" diff --git a/pyproject.toml b/pyproject.toml index b9dca7853..6ff5bb25c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,7 +29,7 @@ djangorestframework-stubs = "^3.16.7" #django-extension = {path = "../django-extension", develop = true} django-extension = { url = "https://github.com/Project-OSmOSE/django-extension/releases/download/v0.1.4/django_extension-0.1.4.tar.gz" } #metadatax = {path = "../metadatax", develop=true} -metadatax = {url = "https://github.com/PAM-Standardization/metadatax/archive/refs/tags/v0.5.2.tar.gz"} +metadatax = {url = "https://github.com/PAM-Standardization/metadatax/archive/refs/tags/v0.5.3.tar.gz"} [tool.poetry.group.dev.dependencies] pytest = "^8.3.5" From f90c3cec2fdbe045cbdb819cd38c99a8ef4c2d0f Mon Sep 17 00:00:00 2001 From: Elodie MORIN Date: Mon, 16 Mar 2026 13:53:34 +0100 Subject: [PATCH 3/6] fix(backend.storage): speed up import process [WIP] --- .../api/models/data/spectrogram_analysis.py | 58 ++-- backend/storage/resolvers/_abstract.py | 53 ++-- backend/storage/resolvers/_legacy_osekit.py | 281 +++++++++++------- backend/storage/resolvers/_model.py | 19 +- backend/storage/resolvers/_osekit.py | 104 ++++--- .../schema/mutations/import_dataset.py | 24 +- backend/storage/utils.py | 2 +- 7 files changed, 336 insertions(+), 205 deletions(-) diff --git a/backend/api/models/data/spectrogram_analysis.py b/backend/api/models/data/spectrogram_analysis.py index c444c4b2d..2521cd1f1 100644 --- a/backend/api/models/data/spectrogram_analysis.py +++ b/backend/api/models/data/spectrogram_analysis.py @@ -1,11 +1,13 @@ """Spectrogram analysis model""" +from datetime import datetime from os.path import join from pathlib import Path from dateutil import parser from django.conf import settings -from django.db import models +from django.db import models, transaction from django.db.models import CheckConstraint, Q, Min, Max +from metadatax.data.models import FileFormat from typing_extensions import deprecated from .__abstract_analysis import AbstractAnalysis @@ -191,30 +193,48 @@ def get_osekit_spectro_dataset_serialized_path(self) -> Path: ) ) + @transaction.atomic def add_spectrograms(self, spectrograms: list["Spectrogram"]): """Add spectrogram objects to current analysis""" - self.spectrograms.bulk_create( - spectrograms, ignore_conflicts=True, batch_size=100 + existing_spectrograms = [] + new_spectrograms = [] + img_format, _ = FileFormat.objects.get_or_create(name="png") + dataset_spectrograms = self.spectrograms.model.objects.filter( + analysis__dataset=self.dataset ) - self.spectrograms.through.objects.bulk_create( - [ - self.spectrograms.through( - spectrogram=self.spectrograms.model.objects.get( - filename=spectrogram.filename, - format=spectrogram.format, - start=spectrogram.start, - end=spectrogram.end, - ), - spectrogramanalysis=self, + + for s in spectrograms: + params = { + "filename": s.filename, + "format": s.format, + "start": s.start, + "end": s.end, + } + if dataset_spectrograms.filter(**params).exists(): + existing_spectrograms.append( + dataset_spectrograms.filter(**params).first() ) - for spectrogram in spectrograms - ] + else: + new_spectrograms.append(s) + + new_spectrograms: list[ + "Spectrogram" + ] = self.spectrograms.model.objects.bulk_create( + new_spectrograms, ignore_conflicts=True ) + spectrogram_analysis_rel = [] + spectrograms = existing_spectrograms + new_spectrograms + for spectrogram in spectrograms: + spectrogram.save() + spectrogram_analysis_rel.append( + self.spectrograms.through( + spectrogram=spectrogram, spectrogramanalysis=self + ) + ) - info = self.spectrograms.aggregate(start=Min("start"), end=Max("end")) - self.start = info["start"] - self.end = info["end"] - self.save() + self.spectrograms.through.objects.bulk_create(spectrogram_analysis_rel) + + self.update_dates() def update_dates(self): """Update start and end dates based on spectrogram data""" diff --git a/backend/storage/resolvers/_abstract.py b/backend/storage/resolvers/_abstract.py index d15dc22b5..1d6db57db 100644 --- a/backend/storage/resolvers/_abstract.py +++ b/backend/storage/resolvers/_abstract.py @@ -1,7 +1,10 @@ from pathlib import PureWindowsPath from backend.api.models import Dataset, SpectrogramAnalysis, Spectrogram -from backend.storage.exceptions import CannotGetChildrenException +from backend.storage.exceptions import ( + CannotGetChildrenException, + AnalysisNotFoundException, +) from backend.storage.types import ( StorageItem, StorageDataset, @@ -70,30 +73,28 @@ def get_dataset(self, path: str | None = None) -> Dataset | FailedItem | None: return self.get_dataset(path=PureWindowsPath(path).parent.as_posix()) def _get_all_analysis_for_dataset( - self, dataset: Dataset + self, dataset: Dataset, detailed: bool = False ) -> list[SpectrogramAnalysis | FailedItem]: return [] - def _get_all_detailed_analysis_for_dataset( - self, dataset: Dataset - ) -> list[SpectrogramAnalysis | FailedItem]: - return self._get_all_analysis_for_dataset(dataset=dataset) - def get_all_analysis( self, path: str | None = None, detailed: bool = False ) -> list[SpectrogramAnalysis | FailedItem]: """Returns analysis list from storage""" - if path is None and not detailed: + if path is None: return self.all_analysis dataset = self.get_dataset(path=path) if not dataset or isinstance(dataset, FailedItem): return [] - if detailed: - return self._get_all_detailed_analysis_for_dataset(dataset=dataset) - return self._get_all_analysis_for_dataset(dataset=dataset) + return self._get_all_analysis_for_dataset(dataset=dataset, detailed=detailed) + + def _get_analysis( + self, dataset: Dataset, relative_path: str, detailed: bool = False + ) -> SpectrogramAnalysis | FailedItem | None: + return None def get_analysis( - self, path: str | None = None + self, path: str | None = None, detailed: bool = False ) -> SpectrogramAnalysis | FailedItem | None: """Returns analysis from storage""" if path is None: @@ -103,10 +104,9 @@ def get_analysis( return None path = path or self.path relative_path = make_path_relative(path, to=dataset.path) - for analysis in self.get_all_analysis(path=path): - if analysis.path == relative_path: - return analysis - return None + return self._get_analysis( + dataset=dataset, relative_path=relative_path, detailed=detailed + ) def get_all_spectrograms_for_analysis( self, analysis: SpectrogramAnalysis @@ -130,19 +130,22 @@ def _get_storage_dataset_from_dataset(self, dataset: Dataset) -> StorageDataset: import_status=ImportStatus.AVAILABLE, ) - def get_item(self, path: str | None = None) -> StorageItem | None: + def get_item( + self, path: str | None = None, discover_analysis: bool = True + ) -> StorageItem | None: """Get item from storage""" dataset = self.get_dataset(path) if not dataset: return StorageFolder(path=path) - analysis = self.get_analysis(path=path) - if analysis: - if isinstance(analysis, FailedItem): - return analysis.to_storage_analysis() - return self._get_storage_analysis_from_spectrogram_analysis( - analysis=analysis - ) + if discover_analysis: + analysis = self.get_analysis(path=path) + if analysis: + if isinstance(analysis, FailedItem): + return analysis.to_storage_analysis() + return self._get_storage_analysis_from_spectrogram_analysis( + analysis=analysis + ) if isinstance(dataset, FailedItem): return dataset.to_storage_dataset() @@ -164,7 +167,7 @@ def get_children_items(self) -> list[StorageItem]: ] return [ - self.get_item(path=folder) + self.get_item(path=folder, discover_analysis=False) for folder in listdir(self.path) if not isfile(folder) ] diff --git a/backend/storage/resolvers/_legacy_osekit.py b/backend/storage/resolvers/_legacy_osekit.py index 3f92de833..8e163fe2e 100644 --- a/backend/storage/resolvers/_legacy_osekit.py +++ b/backend/storage/resolvers/_legacy_osekit.py @@ -1,4 +1,5 @@ import csv +import traceback from ast import literal_eval from functools import reduce from pathlib import PureWindowsPath @@ -28,8 +29,10 @@ listdir, make_path_relative, make_static_url, + clean_path, ) from ._storage import StorageResolver +from ..exceptions import AnalysisNotFoundException class LegacyCSVDataset(TypedDict): @@ -75,7 +78,8 @@ class LegacyOSEkitResolver(StorageResolver): __csv_datasets: list[LegacyCSVDataset] | None = [] - def __load_csv_datasets(self): + # Read CSV + def _load_csv_datasets(self): # Load datasets.csv self.__csv_datasets = [] if exists(settings.DATASET_FILE): @@ -99,9 +103,9 @@ def __load_csv_datasets(self): if len(duplicates) == 0: self.__csv_datasets.append(dataset) - def __get_related_csv_datasets(self, path: str) -> list[LegacyCSVDataset]: + def _get_csv_datasets(self, path: str) -> list[LegacyCSVDataset]: if not self.__csv_datasets: - self.__load_csv_datasets() + self._load_csv_datasets() return [ line for line in self.__csv_datasets @@ -112,10 +116,58 @@ def __get_related_csv_datasets(self, path: str) -> list[LegacyCSVDataset]: ) ] + def _get_csv_dataset( + self, path: str, dataset_sr: int, spectro_duration: int + ) -> LegacyCSVDataset | None: + for line in self._get_csv_datasets(path): + if ( + int(line["dataset_sr"]) == dataset_sr + and int(line["spectro_duration"]) == spectro_duration + ): + return line + return None + + @staticmethod + def _get_timestamps(csv_dataset: LegacyCSVDataset) -> list[LegacyCSVTimestamp]: + config = f"{csv_dataset['spectro_duration']}_{csv_dataset['dataset_sr']}" + timestamp_csv = join(csv_dataset["path"], f"data/audio/{config}/timestamp.csv") + + with open_file(timestamp_csv) as csvfile: + return [ + LegacyCSVTimestamp( + timestamp=Timestamp(info["timestamp"]), + filename=info["filename"], + ) + for info in list(csv.DictReader(csvfile)) + ] + + @staticmethod + def _get_spectro_metadata( + dataset_path: str, analysis_relative_path: str + ) -> LegacyCSVSpectroMetadata: + spectro_metadata_csv = join( + dataset_path, analysis_relative_path, "metadata.csv" + ) + with open_file(spectro_metadata_csv) as csvfile: + # noinspection PyTypeChecker + return next(csv.DictReader(csvfile)) + + @staticmethod + def _get_audio_metadata( + dataset_path: str, data_duration: int, sampling_frequency: int + ) -> LegacyCSVAudioMetadata: + config = f"{data_duration}_{sampling_frequency}" + audio_metadata_csv = join(dataset_path, f"data/audio/{config}/metadata.csv") + with open_file(audio_metadata_csv) as csvfile: + # noinspection PyTypeChecker + return next(csv.DictReader(csvfile)) + + # Implementations + def _get_dataset_for_path( self, path: str | None = None ) -> Dataset | FailedItem | None: - csv_datasets = self.__get_related_csv_datasets(path) + csv_datasets = self._get_csv_datasets(path) if len(csv_datasets) == 0: return super()._get_dataset_for_path(path=path) if len(csv_datasets) == 1: @@ -132,67 +184,81 @@ def _get_dataset_for_path( ) def _get_all_analysis_for_dataset( - self, dataset: Dataset + self, dataset: Dataset, detailed: bool = False ) -> list[SpectrogramAnalysis | FailedItem]: # pylint: disable=broad-exception-caught - csv_datasets = self.__get_related_csv_datasets(dataset.path) + csv_datasets = self._get_csv_datasets(dataset.path) analysis: list[SpectrogramAnalysis | FailedItem] = [] for line in csv_datasets: config = f"{line['spectro_duration']}_{line['dataset_sr']}" relative_path = f"processed/spectrogram/{config}" base_folder = join(line["path"], relative_path) - timestamp_csv = join(line["path"], f"data/audio/{config}/timestamp.csv") - - try: - with open_file(timestamp_csv) as csvfile: - timestamps: list[LegacyCSVTimestamp] = [ - LegacyCSVTimestamp( - timestamp=Timestamp(info["timestamp"]), - filename=info["filename"], - ) - for info in list(csv.DictReader(csvfile)) - ] - except Exception as e: - analysis.append(FailedItem(path=base_folder, error=e)) - continue for folder in listdir(base_folder): - pwp = PureWindowsPath(folder) - name = f"{pwp.parent.name}/{pwp.name}" - try: - spectro_metadata_csv = join(folder, "metadata.csv") - with open_file(spectro_metadata_csv) as csvfile: - # noinspection PyTypeChecker - metadata: LegacyCSVSpectroMetadata = next( - csv.DictReader(csvfile) - ) - except Exception as e: - analysis.append(FailedItem(path=folder, name=name, error=e)) - continue - + print("get inner analysis", dataset, folder) analysis.append( - SpectrogramAnalysis( - name=name, - path=make_path_relative(folder, to=dataset.path), - legacy=True, - start=min(i["timestamp"] for i in timestamps), - end=max(i["timestamp"] for i in timestamps), + self._get_analysis( dataset=dataset, - data_duration=int(line["spectro_duration"]), - fft=FFT( - nfft=int(metadata["nfft"]), - window_size=int(metadata["window_size"]), - overlap=float(metadata["overlap"]), - sampling_frequency=int(line["dataset_sr"]), - legacy=True, - ), - colormap=Colormap(name=metadata["colormap"]), - dynamic_min=float(metadata["dynamic_min"]), - dynamic_max=float(metadata["dynamic_max"]), + relative_path=make_path_relative(folder, to=dataset.path), ) ) return analysis + def _get_analysis( + self, dataset: Dataset, relative_path: str, detailed: bool = False + ) -> SpectrogramAnalysis | FailedItem | None: + full_path = PureWindowsPath(join(dataset.path, relative_path)) + name = f"{full_path.parent.name}/{full_path.name}" + csv_datasets = self._get_csv_datasets( + path=full_path.as_posix(), + ) + if len(csv_datasets) == 0: + return None + + try: + data_duration, sampling_frequency = [ + int(i) for i in full_path.parent.name.split("_") + ] + except Exception as e: + return FailedItem(path=relative_path, name=name, error=e) + csv_dataset = next( + line + for line in csv_datasets + if int(line["dataset_sr"]) == sampling_frequency + and int(line["spectro_duration"]) == data_duration + ) + if csv_dataset is None: + return None + + try: + timestamps = self._get_timestamps(csv_dataset) + metadata = self._get_spectro_metadata( + dataset_path=dataset.path, + analysis_relative_path=relative_path, + ) + except Exception as e: + return FailedItem(path=relative_path, name=name, error=e) + + return SpectrogramAnalysis( + name=name, + path=relative_path, + legacy=True, + start=min(i["timestamp"] for i in timestamps), + end=max(i["timestamp"] for i in timestamps), + dataset=dataset, + data_duration=data_duration, + fft=FFT( + nfft=int(metadata["nfft"]), + window_size=int(metadata["window_size"]), + overlap=float(metadata["overlap"]), + sampling_frequency=sampling_frequency, + legacy=True, + ), + colormap=Colormap(name=metadata["colormap"]), + dynamic_min=float(metadata["dynamic_min"]), + dynamic_max=float(metadata["dynamic_max"]), + ) + def get_all_spectrograms_for_analysis( self, analysis: SpectrogramAnalysis ) -> list[Spectrogram]: @@ -217,69 +283,62 @@ def create_legacy_configuration(self, analysis: SpectrogramAnalysis): """Create legacy configuration object for analysis""" if not analysis.legacy: return None - spectro_metadata_csv = join( - analysis.dataset.path, analysis.path, "metadata.csv" + metadata = self._get_spectro_metadata(analysis.dataset.path, analysis.path) + audio = self._get_audio_metadata( + dataset_path=analysis.dataset.path, + data_duration=int(analysis.data_duration), + sampling_frequency=analysis.fft.sampling_frequency, ) - with open_file(spectro_metadata_csv) as csvfile: - # noinspection PyTypeChecker - metadata: LegacyCSVSpectroMetadata = next(csv.DictReader(csvfile)) - config = f"{int(analysis.data_duration)}_{analysis.fft.sampling_frequency}" - audio_metadata_csv = join( - analysis.dataset.path, f"data/audio/{config}/metadata.csv" - ) - with open_file(audio_metadata_csv) as csvfile: - # noinspection PyTypeChecker - audio: LegacyCSVAudioMetadata = next(csv.DictReader(csvfile)) - - linear_scale: LinearScale | None = None - multilinear_scale: MultiLinearScale | None = None - if "custom_frequency_scale" in metadata: - scale_name = metadata["custom_frequency_scale"] - if scale_name.lower() == "porp_delph": - ( - multilinear_scale, - is_created, - ) = MultiLinearScale.objects.get_or_create(name="porp_delph") - if is_created: - multilinear_scale.inner_scales.add( - LinearScale.objects.get_or_create( - ratio=0.5, min_value=0, max_value=30_000 - )[0] - ) - multilinear_scale.inner_scales.add( - LinearScale.objects.get_or_create( - ratio=0.7, min_value=30_000, max_value=80_000 - )[0] - ) - multilinear_scale.inner_scales.add( - LinearScale.objects.get_or_create( - ratio=1, - min_value=80_000, - max_value=analysis.fft.sampling_frequency / 2, - )[0] - ) - elif scale_name.lower() == "dual_lf_hf": - ( - multilinear_scale, - is_created, - ) = MultiLinearScale.objects.get_or_create(name="dual_lf_hf") - if is_created: - multilinear_scale.inner_scales.add( - LinearScale.objects.get_or_create( - ratio=0.5, min_value=0, max_value=22_000 - )[0] - ) - multilinear_scale.inner_scales.add( - LinearScale.objects.get_or_create( - ratio=1, - min_value=100_000, - max_value=analysis.fft.sampling_frequency / 2, - )[0] - ) - elif scale_name.lower() == "audible": - linear_scale = LinearScale( - name="audible", min_value=0, max_value=22_000 + + linear_scale: LinearScale | None = None + multilinear_scale: MultiLinearScale | None = None + if "custom_frequency_scale" in metadata: + scale_name = metadata["custom_frequency_scale"] + if scale_name.lower() == "porp_delph": + ( + multilinear_scale, + is_created, + ) = MultiLinearScale.objects.get_or_create(name="porp_delph") + if is_created: + multilinear_scale.inner_scales.add( + LinearScale.objects.get_or_create( + ratio=0.5, min_value=0, max_value=30_000 + )[0] + ) + multilinear_scale.inner_scales.add( + LinearScale.objects.get_or_create( + ratio=0.7, min_value=30_000, max_value=80_000 + )[0] + ) + multilinear_scale.inner_scales.add( + LinearScale.objects.get_or_create( + ratio=1, + min_value=80_000, + max_value=analysis.fft.sampling_frequency / 2, + )[0] ) + elif scale_name.lower() == "dual_lf_hf": + ( + multilinear_scale, + is_created, + ) = MultiLinearScale.objects.get_or_create(name="dual_lf_hf") + if is_created: + multilinear_scale.inner_scales.add( + LinearScale.objects.get_or_create( + ratio=0.5, min_value=0, max_value=22_000 + )[0] + ) + multilinear_scale.inner_scales.add( + LinearScale.objects.get_or_create( + ratio=1, + min_value=100_000, + max_value=analysis.fft.sampling_frequency / 2, + )[0] + ) + elif scale_name.lower() == "audible": + linear_scale = LinearScale( + name="audible", min_value=0, max_value=22_000 + ) LegacySpectrogramConfiguration.objects.create( spectrogram_analysis=analysis, diff --git a/backend/storage/resolvers/_model.py b/backend/storage/resolvers/_model.py index 8fdb804c5..79cf4a12e 100644 --- a/backend/storage/resolvers/_model.py +++ b/backend/storage/resolvers/_model.py @@ -1,3 +1,5 @@ +from datetime import datetime + from backend.api.models import ( Dataset, SpectrogramAnalysis, @@ -22,20 +24,33 @@ def _get_dataset_for_path( ).first() or super()._get_dataset_for_path(path=path) def _get_all_analysis_for_dataset( - self, dataset: Dataset + self, dataset: Dataset, detailed: bool = False ) -> list[SpectrogramAnalysis | FailedItem]: analysis = [] for a in dataset.spectrogram_analysis.all(): if exists(join(dataset.path, a.path)): analysis.append(a) + print(datetime.now().isoformat(), "got model analysis") - for a in super()._get_all_analysis_for_dataset(dataset=dataset): + for a in super()._get_all_analysis_for_dataset( + dataset=dataset, detailed=detailed + ): if not dataset.spectrogram_analysis.filter(path=a.path).exists(): analysis.append(a) + print(datetime.now().isoformat(), "got osekit analysis") return analysis + def _get_analysis( + self, dataset: Dataset, relative_path: str, detailed: bool = False + ) -> SpectrogramAnalysis | FailedItem: + if dataset.spectrogram_analysis.filter(path=relative_path).exists(): + return dataset.spectrogram_analysis.get(path=relative_path) + return super()._get_analysis( + dataset=dataset, relative_path=relative_path, detailed=detailed + ) + def _get_storage_analysis_from_spectrogram_analysis( self, analysis: SpectrogramAnalysis ) -> StorageAnalysis: diff --git a/backend/storage/resolvers/_osekit.py b/backend/storage/resolvers/_osekit.py index e708f8342..adb403192 100644 --- a/backend/storage/resolvers/_osekit.py +++ b/backend/storage/resolvers/_osekit.py @@ -1,4 +1,5 @@ import json +import traceback from pathlib import PureWindowsPath, Path from metadatax.data.models import FileFormat @@ -44,7 +45,7 @@ def _get_dataset_for_path( return super()._get_dataset_for_path(path=path) def _get_all_analysis_for_dataset( - self, dataset: Dataset + self, dataset: Dataset, detailed: bool = False ) -> list[SpectrogramAnalysis]: json_path = join(dataset.path, "dataset.json") if not exists(json_path): @@ -57,52 +58,79 @@ def _get_all_analysis_for_dataset( if info["class"] != SpectroDataset.__name__: continue analysis.append( - SpectrogramAnalysis( - name=name, - path=make_path_relative( + self._get_analysis( + dataset=dataset, + relative_path=make_path_relative( PureWindowsPath(info["json"]).parent.as_posix(), to=dataset.path, ), + detailed=detailed, ) ) return analysis - def _get_all_detailed_analysis_for_dataset( - self, dataset: Dataset - ) -> list[SpectrogramAnalysis]: - json_path = join(dataset.path, "dataset.json") - if not exists(json_path): - return super()._get_all_detailed_analysis_for_dataset(dataset=dataset) - osekit_dataset = OSEkitDataset.from_json(Path(make_absolute_server(json_path))) - analysis: list[SpectrogramAnalysis] = [] - for d in osekit_dataset.datasets.values(): - if d["class"] != SpectroDataset.__name__: - continue - sd: SpectroDataset = d["dataset"] - relative_path = make_path_relative( - sd.folder, to=make_path_relative(osekit_dataset.folder) + def _get_analysis( + self, dataset: Dataset, relative_path: str, detailed: bool = False + ) -> SpectrogramAnalysis | FailedItem | None: + if dataset.legacy: + return super()._get_analysis( + dataset=dataset, relative_path=relative_path, detailed=detailed ) - analysis.append( - SpectrogramAnalysis( - name=sd.name, - path=relative_path, - start=sd.begin, - end=sd.end, - dataset=dataset, - data_duration=sd.data_duration.seconds, - fft=FFT( - nfft=sd.fft.mfft, - window_size=sd.fft.win.size, - overlap=1 - (sd.fft.hop / sd.fft.win.size), - sampling_frequency=sd.fft.fs, - scaling=sd.fft.scaling, - ), - colormap=Colormap(name=sd.colormap or "viridis"), - dynamic_min=sd.v_lim[0], - dynamic_max=sd.v_lim[1], - ) + spectro_dataset: SpectroDataset | None = None + json_path = join(dataset.path, "dataset.json") + if exists(json_path): + with open_file(json_path) as f: + d = json.loads(f.read()) + for name, info in d["datasets"].items(): + if info["class"] != SpectroDataset.__name__: + continue + path = make_path_relative( + PureWindowsPath(info["json"]).parent.as_posix(), + to=dataset.path, + ) + if path == relative_path: + if not detailed: + return SpectrogramAnalysis( + name=PureWindowsPath(relative_path).name, + path=relative_path, + ) + spectro_dataset = SpectroDataset.from_json( + Path( + make_absolute_server( + join( + dataset.path, + make_path_relative( + info["json"], to=dataset.path + ), + ) + ) + ) + ) + if spectro_dataset is None: + return None + + if not detailed: + return SpectrogramAnalysis( + name=PureWindowsPath(relative_path).name, path=relative_path ) - return analysis + return SpectrogramAnalysis( + name=PureWindowsPath(relative_path).name, + path=relative_path, + start=spectro_dataset.begin, + end=spectro_dataset.end, + dataset=dataset, + data_duration=spectro_dataset.data_duration.seconds, + fft=FFT( + nfft=spectro_dataset.fft.mfft, + window_size=spectro_dataset.fft.win.size, + overlap=1 - (spectro_dataset.fft.hop / spectro_dataset.fft.win.size), + sampling_frequency=spectro_dataset.fft.fs, + scaling=spectro_dataset.fft.scaling, + ), + colormap=Colormap(name=spectro_dataset.colormap or "viridis"), + dynamic_min=spectro_dataset.v_lim[0], + dynamic_max=spectro_dataset.v_lim[1], + ) def __get_spectro_dataset( self, analysis: SpectrogramAnalysis diff --git a/backend/storage/schema/mutations/import_dataset.py b/backend/storage/schema/mutations/import_dataset.py index b22fc65cf..4e6782c59 100644 --- a/backend/storage/schema/mutations/import_dataset.py +++ b/backend/storage/schema/mutations/import_dataset.py @@ -1,9 +1,12 @@ +from datetime import datetime import graphene +from IPython.testing.tools import full_path from django.db import transaction from django.forms import model_to_dict from django_extension.schema.permissions import GraphQLResolve, GraphQLPermissions from graphene import Mutation, String from graphql import GraphQLError +from msgpack import Timestamp from backend.api.models import ( FFT, @@ -30,7 +33,9 @@ class Arguments: @transaction.atomic def mutate(self, info, dataset_path: str, analysis_path: str | None = None): """Do the mutation: create required analysis""" - resolver = Resolver(join(dataset_path, analysis_path or "")) + + full_path = join(dataset_path, analysis_path or "") + resolver = Resolver(full_path) if not resolver.dataset: raise GraphQLError("Dataset not found") @@ -43,15 +48,16 @@ def mutate(self, info, dataset_path: str, analysis_path: str | None = None): resolver.dataset.save() analysis = [] - for a in resolver.get_all_analysis(detailed=True): - if analysis_path and a.path == analysis_path: + if analysis_path: + if isinstance(resolver.analysis, FailedItem): + raise GraphQLError( + str(resolver.analysis.error), original_error=resolver.analysis.error + ) + analysis.append(resolver.get_analysis(path=full_path, detailed=True)) + else: + for a in resolver.all_analysis: if isinstance(a, FailedItem): - raise GraphQLError(str(a.error), original_error=a.error) - analysis.append(a) - continue - if isinstance(a, FailedItem): - continue - if not analysis_path: + continue analysis.append(a) for sa in analysis: diff --git a/backend/storage/utils.py b/backend/storage/utils.py index 0b5e166d7..1f251e84b 100644 --- a/backend/storage/utils.py +++ b/backend/storage/utils.py @@ -27,7 +27,7 @@ def make_path_relative(path: _Path, to: _Path | None = None) -> str: return clean_path(path) -def join(a: str, /, *paths: str) -> str: +def join(a: _Path, /, *paths: _Path) -> str: """Join multiple paths""" return clean_path(os.path.join(a, *paths)) From 85328cfe3113ce5d609289bba6a1e33eae341851 Mon Sep 17 00:00:00 2001 From: Elodie MORIN Date: Mon, 16 Mar 2026 17:20:26 +0100 Subject: [PATCH 4/6] fix(backend.storage): importing spectrograms with invalid filename --- .../annotation_phase/all_annotation_phases.py | 2 +- backend/storage/resolvers/_abstract.py | 5 +- backend/storage/resolvers/_model.py | 2 - backend/storage/resolvers/_osekit.py | 11 +- .../schema/mutations/import_dataset.py | 5 +- backend/utils/osekit_replace.py | 124 +++++++++--------- 6 files changed, 73 insertions(+), 76 deletions(-) diff --git a/backend/api/tests/schema/annotation_phase/all_annotation_phases.py b/backend/api/tests/schema/annotation_phase/all_annotation_phases.py index 6accff17e..65997954c 100644 --- a/backend/api/tests/schema/annotation_phase/all_annotation_phases.py +++ b/backend/api/tests/schema/annotation_phase/all_annotation_phases.py @@ -102,7 +102,7 @@ def test_connected_admin_filter_owner(self): content = json.loads(response.content)["data"]["allAnnotationPhases"]["results"] self.assertEqual(len(content), AnnotationPhase.objects.count()) - self.assertEqual(content[1]["phase"], "Annotation") + self.assertEqual(content[0]["phase"], "Annotation") def test_connected_admin_filter_annotator(self): response = self.gql_query( diff --git a/backend/storage/resolvers/_abstract.py b/backend/storage/resolvers/_abstract.py index 1d6db57db..d2f59a97a 100644 --- a/backend/storage/resolvers/_abstract.py +++ b/backend/storage/resolvers/_abstract.py @@ -81,8 +81,9 @@ def get_all_analysis( self, path: str | None = None, detailed: bool = False ) -> list[SpectrogramAnalysis | FailedItem]: """Returns analysis list from storage""" - if path is None: + if path is None and not detailed: return self.all_analysis + path = self.path dataset = self.get_dataset(path=path) if not dataset or isinstance(dataset, FailedItem): return [] @@ -155,7 +156,7 @@ def get_children_items(self) -> list[StorageItem]: """Get children items from storage""" dataset = self.get_dataset() analysis = self.get_analysis() - if analysis: + if analysis and not isinstance(analysis, FailedItem): raise CannotGetChildrenException(self.path) if dataset: diff --git a/backend/storage/resolvers/_model.py b/backend/storage/resolvers/_model.py index 79cf4a12e..f5b0e0698 100644 --- a/backend/storage/resolvers/_model.py +++ b/backend/storage/resolvers/_model.py @@ -31,14 +31,12 @@ def _get_all_analysis_for_dataset( for a in dataset.spectrogram_analysis.all(): if exists(join(dataset.path, a.path)): analysis.append(a) - print(datetime.now().isoformat(), "got model analysis") for a in super()._get_all_analysis_for_dataset( dataset=dataset, detailed=detailed ): if not dataset.spectrogram_analysis.filter(path=a.path).exists(): analysis.append(a) - print(datetime.now().isoformat(), "got osekit analysis") return analysis diff --git a/backend/storage/resolvers/_osekit.py b/backend/storage/resolvers/_osekit.py index adb403192..b43bd212a 100644 --- a/backend/storage/resolvers/_osekit.py +++ b/backend/storage/resolvers/_osekit.py @@ -1,9 +1,7 @@ import json -import traceback from pathlib import PureWindowsPath, Path from metadatax.data.models import FileFormat -from osekit.config import TIMESTAMP_FORMAT_EXPORTED_FILES_LOCALIZED from backend.api.models import SpectrogramAnalysis, Dataset, Colormap, FFT, Spectrogram from backend.storage.exceptions import AnalysisNotFoundException @@ -163,7 +161,7 @@ def get_all_spectrograms_for_analysis( return [ Spectrogram( format=img_format, - filename=data.begin.strftime(TIMESTAMP_FORMAT_EXPORTED_FILES_LOCALIZED), + filename=data.name, start=data.begin, end=data.end, ) @@ -180,10 +178,7 @@ def get_spectrogram_paths( ) for spectro_data in sd.data: - filename = spectro_data.begin.strftime( - TIMESTAMP_FORMAT_EXPORTED_FILES_LOCALIZED - ) - if filename == spectrogram.filename: + if spectro_data.name == spectrogram.filename: file = spectro_data.audio_data.files.pop(0) return ( make_static_url(Path(file.path).resolve()) if file else None, @@ -191,7 +186,7 @@ def get_spectrogram_paths( join( clean_path(sd.folder), "spectrogram", - f"{spectro_data.begin.strftime(TIMESTAMP_FORMAT_EXPORTED_FILES_LOCALIZED)}.png", + f"{spectrogram.filename}.png", ) ), ) diff --git a/backend/storage/schema/mutations/import_dataset.py b/backend/storage/schema/mutations/import_dataset.py index 4e6782c59..11a946b25 100644 --- a/backend/storage/schema/mutations/import_dataset.py +++ b/backend/storage/schema/mutations/import_dataset.py @@ -1,12 +1,9 @@ -from datetime import datetime import graphene -from IPython.testing.tools import full_path from django.db import transaction from django.forms import model_to_dict from django_extension.schema.permissions import GraphQLResolve, GraphQLPermissions from graphene import Mutation, String from graphql import GraphQLError -from msgpack import Timestamp from backend.api.models import ( FFT, @@ -55,7 +52,7 @@ def mutate(self, info, dataset_path: str, analysis_path: str | None = None): ) analysis.append(resolver.get_analysis(path=full_path, detailed=True)) else: - for a in resolver.all_analysis: + for a in resolver.get_all_analysis(detailed=True): if isinstance(a, FailedItem): continue analysis.append(a) diff --git a/backend/utils/osekit_replace.py b/backend/utils/osekit_replace.py index d53e1ea19..124838aec 100644 --- a/backend/utils/osekit_replace.py +++ b/backend/utils/osekit_replace.py @@ -103,6 +103,70 @@ def from_json(json_path: Path) -> "SpectroDataset": : -len(f"/{json_path.stem}{json_path.suffix}") ] ) + all_spectro_data = [] + for spectro_name, spectro_data in dataset_data["data"].items(): + audio_files = [] + spectro_files = [] + for name, file in spectro_data["files"].items(): + spectro_files.append( + TFile( + path=join( + folder.parent.parent, + make_path_relative( + file["path"], to=folder.parent.parent + ), + ), + begin=Timestamp( + strptime_from_text( + file["begin"], + datetime_template=TIMESTAMP_FORMATS_EXPORTED_FILES, + ) + ), + end=Timestamp( + strptime_from_text( + file["end"], + datetime_template=TIMESTAMP_FORMATS_EXPORTED_FILES, + ) + ), + ) + ) + for name, file in spectro_data["audio_data"]["files"].items(): + audio_files.append( + TFile( + path=join( + folder.parent.parent, + make_path_relative( + Path(folder, file["path"]).resolve() + if ".." in file["path"] + else file["path"], + to=folder.parent.parent, + ), + ), + begin=Timestamp( + strptime_from_text( + file["begin"], + datetime_template=TIMESTAMP_FORMATS_EXPORTED_FILES, + ) + ), + end=Timestamp( + strptime_from_text( + file["end"], + datetime_template=TIMESTAMP_FORMATS_EXPORTED_FILES, + ) + ), + ) + ) + all_spectro_data.append( + SpectroData( + name=spectro_name, + begin=Timestamp(spectro_data["begin"]), + end=Timestamp(spectro_data["end"]), + v_lim=spectro_data["v_lim"], + audio_data=AudioData(files=audio_files), + files=spectro_files, + ) + ) + return SpectroDataset( folder=folder, name=dataset_data["name"], @@ -115,65 +179,7 @@ def from_json(json_path: Path) -> "SpectroDataset": if sft else None, colormap=list(dataset_data["data"].values())[0]["colormap"], - data=[ - SpectroData( - name=name, - begin=Timestamp(spectro_data["begin"]), - end=Timestamp(spectro_data["end"]), - v_lim=spectro_data["v_lim"], - audio_data=AudioData( - files=[ - TFile( - path=join( - folder.parent.parent, - make_path_relative( - file["path"], to=folder.parent.parent - ), - ), - begin=Timestamp( - strptime_from_text( - file["begin"], - datetime_template=TIMESTAMP_FORMATS_EXPORTED_FILES, - ) - ), - end=Timestamp( - strptime_from_text( - file["end"], - datetime_template=TIMESTAMP_FORMATS_EXPORTED_FILES, - ) - ), - ) - for name, file in spectro_data["audio_data"][ - "files" - ].items() - ], - ), - files=[ - TFile( - path=join( - folder.parent.parent, - make_path_relative( - file["path"], to=folder.parent.parent - ), - ), - begin=Timestamp( - strptime_from_text( - file["begin"], - datetime_template=TIMESTAMP_FORMATS_EXPORTED_FILES, - ) - ), - end=Timestamp( - strptime_from_text( - file["end"], - datetime_template=TIMESTAMP_FORMATS_EXPORTED_FILES, - ) - ), - ) - for name, file in spectro_data["files"].items() - ], - ) - for name, spectro_data in dataset_data["data"].items() - ], + data=all_spectro_data, ) @property From e0fd348759c97ddd01bf825ce2af4674de2ef827 Mon Sep 17 00:00:00 2001 From: Elodie MORIN Date: Mon, 16 Mar 2026 17:34:47 +0100 Subject: [PATCH 5/6] lint --- backend/api/models/data/spectrogram_analysis.py | 3 --- backend/storage/resolvers/_abstract.py | 6 ++---- backend/storage/resolvers/_legacy_osekit.py | 5 +---- backend/storage/resolvers/_model.py | 2 -- backend/storage/resolvers/_osekit.py | 4 ++-- backend/utils/osekit_replace.py | 4 ++-- 6 files changed, 7 insertions(+), 17 deletions(-) diff --git a/backend/api/models/data/spectrogram_analysis.py b/backend/api/models/data/spectrogram_analysis.py index 2521cd1f1..5cefd9a3d 100644 --- a/backend/api/models/data/spectrogram_analysis.py +++ b/backend/api/models/data/spectrogram_analysis.py @@ -1,5 +1,4 @@ """Spectrogram analysis model""" -from datetime import datetime from os.path import join from pathlib import Path @@ -7,7 +6,6 @@ from django.conf import settings from django.db import models, transaction from django.db.models import CheckConstraint, Q, Min, Max -from metadatax.data.models import FileFormat from typing_extensions import deprecated from .__abstract_analysis import AbstractAnalysis @@ -198,7 +196,6 @@ def add_spectrograms(self, spectrograms: list["Spectrogram"]): """Add spectrogram objects to current analysis""" existing_spectrograms = [] new_spectrograms = [] - img_format, _ = FileFormat.objects.get_or_create(name="png") dataset_spectrograms = self.spectrograms.model.objects.filter( analysis__dataset=self.dataset ) diff --git a/backend/storage/resolvers/_abstract.py b/backend/storage/resolvers/_abstract.py index d2f59a97a..935be13b2 100644 --- a/backend/storage/resolvers/_abstract.py +++ b/backend/storage/resolvers/_abstract.py @@ -3,8 +3,8 @@ from backend.api.models import Dataset, SpectrogramAnalysis, Spectrogram from backend.storage.exceptions import ( CannotGetChildrenException, - AnalysisNotFoundException, ) +from backend.storage.models import ImportStatus from backend.storage.types import ( StorageItem, StorageDataset, @@ -12,7 +12,6 @@ FailedItem, StorageFolder, ) -from backend.storage.models import ImportStatus from backend.storage.utils import make_path_relative, is_local_root, listdir, isfile @@ -46,7 +45,6 @@ def analysis(self) -> SpectrogramAnalysis | FailedItem | None: return self.__analysis def __init__(self, path: str): - # pylint: disable=broad-exception-caught self.__path = path self.__dataset = self.get_dataset(path=self.path) self.__all_analysis = self.get_all_analysis(path=self.path) @@ -59,7 +57,7 @@ def _get_dataset_for_path( def get_dataset(self, path: str | None = None) -> Dataset | FailedItem | None: """Returns dataset from storage""" - # pylint: disable=broad-exception-caught, assignment-from-none + # pylint: disable=assignment-from-none if path is None: return self.dataset diff --git a/backend/storage/resolvers/_legacy_osekit.py b/backend/storage/resolvers/_legacy_osekit.py index 8e163fe2e..c84a6effd 100644 --- a/backend/storage/resolvers/_legacy_osekit.py +++ b/backend/storage/resolvers/_legacy_osekit.py @@ -1,5 +1,4 @@ import csv -import traceback from ast import literal_eval from functools import reduce from pathlib import PureWindowsPath @@ -29,10 +28,8 @@ listdir, make_path_relative, make_static_url, - clean_path, ) from ._storage import StorageResolver -from ..exceptions import AnalysisNotFoundException class LegacyCSVDataset(TypedDict): @@ -186,7 +183,6 @@ def _get_dataset_for_path( def _get_all_analysis_for_dataset( self, dataset: Dataset, detailed: bool = False ) -> list[SpectrogramAnalysis | FailedItem]: - # pylint: disable=broad-exception-caught csv_datasets = self._get_csv_datasets(dataset.path) analysis: list[SpectrogramAnalysis | FailedItem] = [] for line in csv_datasets: @@ -207,6 +203,7 @@ def _get_all_analysis_for_dataset( def _get_analysis( self, dataset: Dataset, relative_path: str, detailed: bool = False ) -> SpectrogramAnalysis | FailedItem | None: + # pylint: disable=broad-exception-caught full_path = PureWindowsPath(join(dataset.path, relative_path)) name = f"{full_path.parent.name}/{full_path.name}" csv_datasets = self._get_csv_datasets( diff --git a/backend/storage/resolvers/_model.py b/backend/storage/resolvers/_model.py index f5b0e0698..cefa4780d 100644 --- a/backend/storage/resolvers/_model.py +++ b/backend/storage/resolvers/_model.py @@ -1,5 +1,3 @@ -from datetime import datetime - from backend.api.models import ( Dataset, SpectrogramAnalysis, diff --git a/backend/storage/resolvers/_osekit.py b/backend/storage/resolvers/_osekit.py index b43bd212a..806531bba 100644 --- a/backend/storage/resolvers/_osekit.py +++ b/backend/storage/resolvers/_osekit.py @@ -52,7 +52,7 @@ def _get_all_analysis_for_dataset( analysis: list[SpectrogramAnalysis] = [] with open_file(json_path) as f: d = json.loads(f.read()) - for name, info in d["datasets"].items(): + for _name, info in d["datasets"].items(): if info["class"] != SpectroDataset.__name__: continue analysis.append( @@ -79,7 +79,7 @@ def _get_analysis( if exists(json_path): with open_file(json_path) as f: d = json.loads(f.read()) - for name, info in d["datasets"].items(): + for _name, info in d["datasets"].items(): if info["class"] != SpectroDataset.__name__: continue path = make_path_relative( diff --git a/backend/utils/osekit_replace.py b/backend/utils/osekit_replace.py index 124838aec..2999989d8 100644 --- a/backend/utils/osekit_replace.py +++ b/backend/utils/osekit_replace.py @@ -107,7 +107,7 @@ def from_json(json_path: Path) -> "SpectroDataset": for spectro_name, spectro_data in dataset_data["data"].items(): audio_files = [] spectro_files = [] - for name, file in spectro_data["files"].items(): + for _name, file in spectro_data["files"].items(): spectro_files.append( TFile( path=join( @@ -130,7 +130,7 @@ def from_json(json_path: Path) -> "SpectroDataset": ), ) ) - for name, file in spectro_data["audio_data"]["files"].items(): + for _name, file in spectro_data["audio_data"]["files"].items(): audio_files.append( TFile( path=join( From b55f188fd28ef0556d5262f84ae8a955d2d24937 Mon Sep 17 00:00:00 2001 From: Elodie MORIN Date: Mon, 16 Mar 2026 20:26:28 +0100 Subject: [PATCH 6/6] test(frontend) --- frontend/package.json | 1 + frontend/tests/02-CampaignList.spec.ts | 14 +++++++++++++- frontend/tests/03-CampaignCreate.spec.ts | 4 ++-- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/frontend/package.json b/frontend/package.json index e60a7f66a..92a352234 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -23,6 +23,7 @@ "dependencies": { "@codemirror/commands": "^6.8.1", "@codemirror/lang-sql": "^6.8.0", + "@codemirror/lang-sql": "^6.8.0", "@codemirror/view": "^6.36.5", "@ionic/react": "^7.6.2", "@reduxjs/toolkit": "^2.2.1", diff --git a/frontend/tests/02-CampaignList.spec.ts b/frontend/tests/02-CampaignList.spec.ts index ed773c13d..38536bafd 100644 --- a/frontend/tests/02-CampaignList.spec.ts +++ b/frontend/tests/02-CampaignList.spec.ts @@ -139,6 +139,16 @@ const TEST = { }) }), + cannotAccessCampaignCreation: ({ as, tag }: Pick) => + test(`Cannot access campaign creation ${ as }`, { tag }, async ({ page }) => { + await interceptRequests(page, { getCurrentUser: as }) + await test.step(`Navigate`, () => page.campaigns.go({ as })); + + await test.step('Cannot access campaign creation', async () => { + await expect(page.campaigns.createCampaignButton).not.toBeVisible() + }) + }), + accessCampaignCreation: ({ as, tag }: Pick) => test(`Access campaign creation ${ as }`, { tag }, async ({ page }) => { await interceptRequests(page, { getCurrentUser: as }) @@ -160,6 +170,8 @@ test.describe('/annotation-campaign', () => { TEST.filterCampaigns({ as, tag: essentialTag }) - TEST.accessCampaignCreation({ as, tag: essentialTag }) + TEST.cannotAccessCampaignCreation({ as, tag: essentialTag }) + TEST.accessCampaignCreation({ as: 'staff', tag: essentialTag }) + TEST.accessCampaignCreation({ as: 'superuser', tag: essentialTag }) }) diff --git a/frontend/tests/03-CampaignCreate.spec.ts b/frontend/tests/03-CampaignCreate.spec.ts index 20183fdae..b9d7c8873 100644 --- a/frontend/tests/03-CampaignCreate.spec.ts +++ b/frontend/tests/03-CampaignCreate.spec.ts @@ -142,7 +142,7 @@ const TEST = { await test.step('Cannot select a dataset if none is imported', async () => { await expect(page.getByText('You should first import dataset from Storage')).toBeVisible(); - await expect(page.getByRole('button', { name: 'Storage' })).toBeEnabled(); + await expect(page.getByRole('button', { name: 'Storage' }).last()).toBeEnabled(); }) await test.step('Cannot submit empty form', async () => { @@ -157,7 +157,7 @@ const TEST = { // Tests test.describe('/annotation-campaign/new', () => { - const as: UserType = 'annotator' + const as: UserType = 'staff' TEST.handleEmptyState({ as, tag: essentialTag })