diff --git a/.gitignore b/.gitignore index 49c44da06..8500d0681 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,8 @@ dist dist-ssr *.local +.rustup/ + # Editor directories and files .vscode/* !.vscode/extensions.json diff --git a/scripts/cargo b/scripts/cargo new file mode 100755 index 000000000..1fbe6030f --- /dev/null +++ b/scripts/cargo @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_PATH="$(readlink -f "$0")" +SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" + +find_real_cargo() { + local candidates=() + if [[ -n "${CARGO_REAL:-}" ]]; then + candidates+=("$CARGO_REAL") + fi + if [[ -n "${HOME:-}" ]]; then + candidates+=("$HOME/.cargo/bin/cargo") + fi + candidates+=("/usr/bin/cargo" "/usr/local/bin/cargo") + + for candidate in "${candidates[@]}"; do + if [[ -x "$candidate" ]]; then + local resolved_candidate + resolved_candidate="$(readlink -f "$candidate" 2>/dev/null || echo "$candidate")" + if [[ "$resolved_candidate" != "$SCRIPT_PATH" ]]; then + echo "$candidate" + return 0 + fi + fi + done + return 1 +} + +REAL_CARGO="$(find_real_cargo)" || { + echo "cargo wrapper: unable to locate a real cargo binary (searched ~/.cargo/bin, /usr/bin, /usr/local/bin)" >&2 + exit 1 +} + +subcommand="${1:-}" + +if [[ "$subcommand" != "metadata" ]]; then + exec "$REAL_CARGO" "$@" +fi + +tmpfile="$(mktemp)" +cleanup() { + rm -f "$tmpfile" +} +trap cleanup EXIT + +if ! "$REAL_CARGO" "$@" >"$tmpfile"; then + status=$? + cat "$tmpfile" + exit $status +fi + +manifest_path="" +argv=("$@") +for ((i = 0; i < ${#argv[@]}; i++)); do + if [[ "${argv[$i]}" == "--manifest-path" && $((i + 1)) -lt ${#argv[@]} ]]; then + manifest_path="${argv[$((i + 1))]}" + break + fi +done + +if [[ -n "$manifest_path" ]]; then + workspace_root="$(python - <<'PY' "$manifest_path" +import os, sys +print(os.path.dirname(os.path.abspath(sys.argv[1]))) +PY +)" +else + workspace_root="$(pwd)" +fi + +target_dir="${workspace_root}/target" + +python - <<'PY' "$tmpfile" "$workspace_root" "$target_dir" +import json +import sys +path, workspace, target = sys.argv[1:4] +with open(path, 'r', encoding='utf-8') as handle: + data = json.load(handle) + +if 'target_directory' not in data: + data['target_directory'] = target +if 'workspace_root' not in data: + data['workspace_root'] = workspace +if 'metadata' not in data: + data['metadata'] = {} + +json.dump(data, sys.stdout) +PY diff --git a/scripts/run-tauri.js b/scripts/run-tauri.js new file mode 100644 index 000000000..711f050c7 --- /dev/null +++ b/scripts/run-tauri.js @@ -0,0 +1,17 @@ +#!/usr/bin/env node +const { spawn } = require('node:child_process'); +const path = require('node:path'); + +const scriptsDir = path.resolve(__dirname); +const env = { ...process.env }; +env.PATH = `${scriptsDir}${path.delimiter}${env.PATH || ''}`; + +const child = spawn('tauri', ['dev'], { + stdio: 'inherit', + env, + shell: false, +}); + +child.on('exit', (code) => { + process.exit(code ?? 1); +}); diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index 79d352523..01898ab6e 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -25,6 +25,7 @@ dependencies = [ "kamadak-exif", "little_exif", "log", + "lru", "memmap2", "mimalloc", "nalgebra 0.34.1", @@ -2464,6 +2465,8 @@ version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ + "allocator-api2", + "equivalent", "foldhash 0.1.5", ] @@ -3517,6 +3520,15 @@ dependencies = [ "imgref", ] +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.5", +] + [[package]] name = "lru-slab" version = "0.1.2" diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index 066aa37a0..7b71d7c1c 100644 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -61,6 +61,7 @@ io = "0.0.2" half = { version = "2.7.1", features = ["bytemuck"] } exr = "1.73.0" glam = "0.30.9" +lru = "0.12" [build-dependencies] tauri-build = { version = "2.4", features = [] } diff --git a/src-tauri/src/cache.rs b/src-tauri/src/cache.rs new file mode 100644 index 000000000..7ff231d8e --- /dev/null +++ b/src-tauri/src/cache.rs @@ -0,0 +1,420 @@ +//! Image processing cache with LRU eviction strategy. +//! +//! This module provides caching for expensive image processing operations that occur during +//! mask generation and AI patch compositing. By caching decoded masks and patches, we avoid +//! redundant base64 decoding, image loading, and resizing operations. +//! +//! ## Cache Design +//! +//! The cache uses an LRU (Least Recently Used) eviction policy with a configurable size limit +//! (default: 2GB). This provides a safety net against memory exhaustion while acting like an +//! unbounded cache for typical use cases. +//! +//! ## Relationship to Other Caches +//! +//! This cache is part of a multi-layer caching strategy: +//! +//! - `cached_preview` (in AppState): Stores the final transformed CPU-side preview image +//! - `gpu_image_cache` (in gpu_processing): Caches GPU texture uploads to avoid redundant transfers +//! - `processing_cache` (this module): Caches intermediate decoded masks and AI patches +//! - `gpu_processor` (in AppState): Reusable GPU pipeline/shader resources (global scope) +//! +//! Each cache serves a distinct purpose in the rendering pipeline and does not duplicate functionality. + +use image::{DynamicImage, GrayImage, Rgb32FImage}; +use lru::LruCache; +use std::hash::Hash; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; + +/// Cache key for identifying cached image processing results. +/// +/// Each variant includes the image path and processing parameters to ensure cache correctness. +/// The key must uniquely identify the cached data - any change in parameters that affects +/// the output must be reflected in the key. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum CacheKey { + /// A decoded mask from base64 data, specific to an image and transform state. + /// + /// Masks are decoded from base64 strings and can be transformed (rotated, flipped, etc). + /// The transform_hash ensures we cache different versions for different transform states. + DecodedMask { + image_path: String, + mask_id: String, + width: u32, + height: u32, + transform_hash: u64, + }, + /// A decoded AI patch (generative replace result) specific to an image. + /// + /// Patches are decoded from base64 RGB32F data and resized to match the target dimensions. + /// content_hash ensures we invalidate when the patch is regenerated with the same ID. + DecodedPatch { + image_path: String, + patch_id: String, + width: u32, + height: u32, + content_hash: u64, + }, + /// A blended mask result combining multiple masks for an image. + /// + /// When multiple masks are active, they are blended together. The masks_hash uniquely + /// identifies the combination and ordering of masks to ensure cache correctness. + BlendedMask { + image_path: String, + masks_hash: u64, + width: u32, + height: u32, + }, + /// A full-resolution transformed image (after orientation, rotation, crop). + /// + /// Caches the CPU-side transform pipeline output to avoid re-running expensive operations + /// when only tone adjustments change. The composite_hash includes both transform state and + /// context (Preview/OriginalPreview/Fullscreen/Export) to prevent contamination. + TransformedImage { + image_path: String, + composite_hash: u64, + width: u32, + height: u32, + }, +} + +/// Cached image processing data with size tracking. +/// +/// Each variant stores an Arc to the image data for efficient cloning, along with the +/// byte size for cache eviction calculations. +pub enum CachedData { + Mask(Arc, usize), + Patch(Arc, usize), + BlendedMask(Arc, usize), + TransformedImage(Arc, (f32, f32), usize), +} + +impl CachedData { + pub fn byte_size(&self) -> usize { + match self { + CachedData::Mask(_, size) => *size, + CachedData::Patch(_, size) => *size, + CachedData::BlendedMask(_, size) => *size, + CachedData::TransformedImage(_, _, size) => *size, + } + } + + pub fn as_mask(&self) -> Option> { + match self { + CachedData::Mask(img, _) | CachedData::BlendedMask(img, _) => Some(Arc::clone(img)), + _ => None, + } + } + + pub fn as_patch(&self) -> Option> { + match self { + CachedData::Patch(img, _) => Some(Arc::clone(img)), + _ => None, + } + } + + pub fn as_transformed_image(&self) -> Option<(Arc, (f32, f32))> { + match self { + CachedData::TransformedImage(img, offset, _) => Some((Arc::clone(img), *offset)), + _ => None, + } + } +} + +/// Statistics for cache performance monitoring. +/// +/// Provides metrics for understanding cache effectiveness and memory usage. +/// Use `hit_rate()` and `utilization()` for derived metrics. +#[derive(Debug, Clone, Copy, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct CacheStats { + pub hits: usize, + pub misses: usize, + pub evictions: usize, + pub current_size_bytes: usize, + pub max_size_bytes: usize, + pub entry_count: usize, +} + +impl CacheStats { + pub fn hit_rate(&self) -> f64 { + let total = self.hits + self.misses; + if total == 0 { + 0.0 + } else { + self.hits as f64 / total as f64 + } + } + + pub fn utilization(&self) -> f64 { + if self.max_size_bytes == 0 { + 0.0 + } else { + self.current_size_bytes as f64 / self.max_size_bytes as f64 + } + } +} + +/// LRU cache for intermediate image processing results. +/// +/// Caches decoded masks and AI patches to avoid redundant base64 decoding, image loading, +/// and resizing operations. The cache tracks memory usage and evicts least-recently-used +/// entries when the size limit is exceeded. +/// +/// ## Thread Safety +/// +/// This cache is designed to be wrapped in a `Mutex` in the AppState. While individual +/// operations use atomic counters for statistics, the LRU cache itself requires exclusive +/// access for get/insert operations. +/// +/// ## Memory Management +/// +/// The cache enforces a strict size limit (default: 2GB) by evicting LRU entries before +/// inserting new data that would exceed the limit. Each entry tracks its byte size for +/// accurate memory accounting. +pub struct ImageProcessingCache { + cache: LruCache, + current_size: AtomicUsize, + max_size: usize, + hits: AtomicUsize, + misses: AtomicUsize, + evictions: AtomicUsize, +} + +impl ImageProcessingCache { + pub fn new(max_size_bytes: usize) -> Self { + Self { + cache: LruCache::unbounded(), + current_size: AtomicUsize::new(0), + max_size: max_size_bytes, + hits: AtomicUsize::new(0), + misses: AtomicUsize::new(0), + evictions: AtomicUsize::new(0), + } + } + + pub fn get_transformed_image(&mut self, image_path: &str, composite_hash: u64) -> Option { + // Find TransformedImage by path and composite_hash, ignoring dimensions + let key = self.cache.iter().find_map(|(k, _)| { + if let CacheKey::TransformedImage { image_path: path, composite_hash: hash, .. } = k { + if path == image_path && *hash == composite_hash { + return Some(k.clone()); + } + } + None + })?; + + self.get(&key) + } + + pub fn get(&mut self, key: &CacheKey) -> Option { + let key_summary = self.cache_key_summary(key); + + if let Some(data) = self.cache.get(key) { + self.hits.fetch_add(1, Ordering::Relaxed); + let hit_count = self.hits.load(Ordering::Relaxed); + log::info!("Cache: hit (total hits: {}) - {}", hit_count, key_summary); + Some(match data { + CachedData::Mask(img, size) => CachedData::Mask(Arc::clone(img), *size), + CachedData::Patch(img, size) => CachedData::Patch(Arc::clone(img), *size), + CachedData::BlendedMask(img, size) => { + CachedData::BlendedMask(Arc::clone(img), *size) + } + CachedData::TransformedImage(img, offset, size) => { + CachedData::TransformedImage(Arc::clone(img), *offset, *size) + } + }) + } else { + self.misses.fetch_add(1, Ordering::Relaxed); + let miss_count = self.misses.load(Ordering::Relaxed); + log::info!("Cache: miss (total misses: {}) - {}", miss_count, key_summary); + None + } + } + + pub fn insert(&mut self, key: CacheKey, data: CachedData) { + let data_size = data.byte_size(); + let data_size_mb = data_size as f64 / (1024.0 * 1024.0); + let key_summary = self.cache_key_summary(&key); + + // Evict LRU entries if needed + let mut evicted_count = 0; + while self.current_size.load(Ordering::Relaxed) + data_size > self.max_size { + if let Some((_, evicted)) = self.cache.pop_lru() { + self.current_size + .fetch_sub(evicted.byte_size(), Ordering::Relaxed); + self.evictions.fetch_add(1, Ordering::Relaxed); + evicted_count += 1; + } else { + break; + } + } + + if evicted_count > 0 { + log::warn!("Cache: evicted {} LRU entries to make room ({:.2} MB needed)", evicted_count, data_size_mb); + } + + if let Some((_old_key, old_data)) = self.cache.push(key, data) { + self.current_size + .fetch_sub(old_data.byte_size(), Ordering::Relaxed); + log::debug!("Cache: replaced existing entry"); + } + self.current_size.fetch_add(data_size, Ordering::Relaxed); + + let total_mb = self.current_size.load(Ordering::Relaxed) as f64 / (1024.0 * 1024.0); + let max_mb = self.max_size as f64 / (1024.0 * 1024.0); + log::info!("Cache: insert {} ({:.2} MB) | total: {:.1}/{:.0} MB ({} entries)", + key_summary, data_size_mb, total_mb, max_mb, self.cache.len()); + } + + pub fn clear(&mut self) { + let entry_count = self.cache.len(); + let size_mb = self.current_size.load(Ordering::Relaxed) as f64 / (1024.0 * 1024.0); + self.cache.clear(); + self.current_size.store(0, Ordering::Relaxed); + log::info!("Cache: cleared - removed {} entries ({:.1} MB)", entry_count, size_mb); + } + + pub fn set_max_size(&mut self, max_size_bytes: usize) { + self.max_size = max_size_bytes; + + while self.current_size.load(Ordering::Relaxed) > self.max_size { + if let Some((_, evicted)) = self.cache.pop_lru() { + self.current_size + .fetch_sub(evicted.byte_size(), Ordering::Relaxed); + self.evictions.fetch_add(1, Ordering::Relaxed); + } else { + break; + } + } + } + + pub fn stats(&self) -> CacheStats { + CacheStats { + hits: self.hits.load(Ordering::Relaxed), + misses: self.misses.load(Ordering::Relaxed), + evictions: self.evictions.load(Ordering::Relaxed), + current_size_bytes: self.current_size.load(Ordering::Relaxed), + max_size_bytes: self.max_size, + entry_count: self.cache.len(), + } + } + + fn cache_key_summary(&self, key: &CacheKey) -> String { + match key { + CacheKey::DecodedMask { mask_id, width, height, .. } => { + format!("Mask(id: {}, {}x{})", mask_id, width, height) + } + CacheKey::DecodedPatch { patch_id, width, height, .. } => { + format!("Patch(id: {}, {}x{})", patch_id, width, height) + } + CacheKey::BlendedMask { width, height, .. } => { + format!("BlendedMask({}x{})", width, height) + } + CacheKey::TransformedImage { image_path, composite_hash, width, height } => { + let filename = std::path::Path::new(image_path) + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(image_path); + format!("TransformedImage({}, {:016x}, {}x{})", filename, composite_hash, width, height) + } + } + } + + pub fn invalidate_image(&mut self, image_path: &str) { + let keys_to_remove: Vec = self + .cache + .iter() + .filter_map(|(key, _)| { + let matches = match key { + CacheKey::DecodedMask { image_path: path, .. } => path == image_path, + CacheKey::DecodedPatch { image_path: path, .. } => path == image_path, + CacheKey::BlendedMask { image_path: path, .. } => path == image_path, + CacheKey::TransformedImage { image_path: path, .. } => path == image_path, + }; + if matches { + Some(key.clone()) + } else { + None + } + }) + .collect(); + + let removed_count = keys_to_remove.len(); + for key in keys_to_remove { + if let Some(data) = self.cache.pop(&key) { + self.current_size + .fetch_sub(data.byte_size(), Ordering::Relaxed); + } + } + + if removed_count > 0 { + log::debug!("Cache: invalidated {} entries for image: {}", removed_count, image_path); + } + } +} + +pub fn calculate_mask_size(width: u32, height: u32) -> usize { + (width * height) as usize +} + +pub fn calculate_patch_size(width: u32, height: u32) -> usize { + (width * height * 12) as usize +} + +pub fn calculate_transformed_image_size(width: u32, height: u32) -> usize { + // DynamicImage from apply_all_transformations is typically Rgb32F (3 channels * 4 bytes) + // Use conservative estimate of 12 bytes per pixel for proper LRU eviction + (width * height * 12) as usize +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_cache_basic_operations() { + let mut cache = ImageProcessingCache::new(1000); + + let key = CacheKey::DecodedMask { + image_path: "test.raw".to_string(), + mask_id: "mask1".to_string(), + width: 10, + height: 10, + transform_hash: 0, + }; + + let mask = Arc::new(GrayImage::new(10, 10)); + let data = CachedData::Mask(mask, 100); + + cache.insert(key.clone(), data); + assert!(cache.get(&key).is_some()); + + let stats = cache.stats(); + assert_eq!(stats.hits, 1); + assert_eq!(stats.misses, 0); + } + + #[test] + fn test_cache_eviction() { + let mut cache = ImageProcessingCache::new(150); + + for i in 0..3 { + let key = CacheKey::DecodedMask { + image_path: format!("test{}.raw", i), + mask_id: "mask1".to_string(), + width: 10, + height: 10, + transform_hash: 0, + }; + let mask = Arc::new(GrayImage::new(10, 10)); + let data = CachedData::Mask(mask, 100); + cache.insert(key, data); + } + + let stats = cache.stats(); + assert!(stats.evictions > 0); + assert!(stats.current_size_bytes <= 150); + } +} diff --git a/src-tauri/src/file_management.rs b/src-tauri/src/file_management.rs index 21c86cd45..8b7ab0f60 100644 --- a/src-tauri/src/file_management.rs +++ b/src-tauri/src/file_management.rs @@ -260,6 +260,9 @@ pub struct AppSettings { pub processing_backend: Option, #[serde(default)] pub linux_gpu_optimization: Option, + /// Cache size limit in megabytes (default: 2048 MB = 2 GB) + #[serde(default)] + pub cache_max_size_mb: Option, } fn default_adjustment_visibility() -> HashMap { @@ -309,6 +312,7 @@ impl Default for AppSettings { linux_gpu_optimization: Some(true), #[cfg(not(target_os = "linux"))] linux_gpu_optimization: Some(false), + cache_max_size_mb: Some(2048), } } } @@ -564,7 +568,8 @@ pub fn generate_thumbnail_data( let highlight_compression = settings.raw_highlight_compression.unwrap_or(2.5); let composite_image = if let Some(img) = preloaded_image { - image_loader::composite_patches_on_image(img, &adjustments)? + let state = app_handle.state::(); + image_loader::composite_patches_on_image(img, &adjustments, Some(&state), path_str)? } else { match read_file_mapped(Path::new(path_str)) { Ok(mmap) => image_loader::load_and_composite( diff --git a/src-tauri/src/gpu_processing.rs b/src-tauri/src/gpu_processing.rs index 7b547962f..2ec832bbd 100644 --- a/src-tauri/src/gpu_processing.rs +++ b/src-tauri/src/gpu_processing.rs @@ -138,8 +138,15 @@ struct BlurParams { _pad: u32, } -struct GpuProcessor<'a> { - context: &'a GpuContext, +/// Reusable GPU processing resources. +/// +/// Contains pipelines, shaders, bind group layouts, and buffers that are expensive to create +/// but can be reused across multiple images. This struct is designed to be cached globally +/// in AppState to avoid recreating GPU resources on every render. +/// +/// Image-specific resources (mask textures, LUT textures) are created on-demand per render. +pub struct GpuProcessor { + context: Arc, blur_bgl: wgpu::BindGroupLayout, h_blur_pipeline: wgpu::ComputePipeline, v_blur_pipeline: wgpu::ComputePipeline, @@ -149,21 +156,17 @@ struct GpuProcessor<'a> { adjustments_buffer: wgpu::Buffer, dummy_blur_view: wgpu::TextureView, dummy_mask_view: wgpu::TextureView, - lut_texture_view: wgpu::TextureView, - lut_sampler: wgpu::Sampler, - mask_views: Vec, + dummy_lut_view: wgpu::TextureView, + dummy_lut_sampler: wgpu::Sampler, } -impl<'a> GpuProcessor<'a> { - fn new( - context: &'a GpuContext, - width: u32, - height: u32, - mask_bitmaps: &[ImageBuffer, Vec>], - lut: Option>, - ) -> Result { +impl GpuProcessor { + /// Creates a new GpuProcessor with reusable GPU resources. + /// + /// This is expensive and should only be called once, then cached in AppState. + /// The created pipelines, shaders, and layouts can be reused across all images. + pub fn new(context: Arc) -> Result { let device = &context.device; - let queue = &context.queue; const MAX_MASKS: u32 = 11; let blur_shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor { @@ -391,6 +394,41 @@ impl<'a> GpuProcessor<'a> { let dummy_lut_view = dummy_lut_texture.create_view(&Default::default()); let dummy_lut_sampler = device.create_sampler(&wgpu::SamplerDescriptor::default()); + Ok(Self { + context, + blur_bgl, + h_blur_pipeline, + v_blur_pipeline, + blur_params_buffer, + main_bgl, + main_pipeline, + adjustments_buffer, + dummy_blur_view, + dummy_mask_view, + dummy_lut_view, + dummy_lut_sampler, + }) + } + + /// Runs the GPU processing pipeline with the given parameters. + /// + /// Creates image-specific textures (masks, LUT) on-demand for this render. + /// The expensive pipeline/shader resources are reused from the cached GpuProcessor. + fn run( + &self, + input_texture_view: &wgpu::TextureView, + width: u32, + height: u32, + adjustments: AllAdjustments, + mask_bitmaps: &[ImageBuffer, Vec>], + lut: Option>, + ) -> Result, String> { + let device = &self.context.device; + let queue = &self.context.queue; + let scale = (width.min(height) as f32) / 1080.0; + const MAX_MASKS: u32 = 11; + + // Create image-specific mask textures let full_texture_size = wgpu::Extent3d { width, height, @@ -402,7 +440,7 @@ impl<'a> GpuProcessor<'a> { let mask_texture = device.create_texture_with_data( queue, &wgpu::TextureDescriptor { - label: Some("Full Mask Texture"), + label: Some("Mask Texture"), size: full_texture_size, mip_level_count: 1, sample_count: 1, @@ -418,6 +456,7 @@ impl<'a> GpuProcessor<'a> { }) .collect(); + // Create image-specific LUT texture if provided let (lut_texture_view, lut_sampler) = if let Some(lut_arc) = &lut { let lut_data = &lut_arc.data; let size = lut_arc.size; @@ -458,38 +497,9 @@ impl<'a> GpuProcessor<'a> { }); (view, sampler) } else { - (dummy_lut_view.clone(), dummy_lut_sampler) + (self.dummy_lut_view.clone(), self.dummy_lut_sampler.clone()) }; - Ok(Self { - context, - blur_bgl, - h_blur_pipeline, - v_blur_pipeline, - blur_params_buffer, - main_bgl, - main_pipeline, - adjustments_buffer, - dummy_blur_view, - dummy_mask_view, - lut_texture_view, - lut_sampler, - mask_views, - }) - } - - fn run( - &self, - input_texture_view: &wgpu::TextureView, - width: u32, - height: u32, - adjustments: AllAdjustments, - ) -> Result, String> { - let device = &self.context.device; - let queue = &self.context.queue; - let scale = (width.min(height) as f32) / 1080.0; - const MAX_MASKS: u32 = 11; - const TILE_SIZE: u32 = 2048; const TILE_OVERLAP: u32 = 128; let max_tile_input_dim = TILE_SIZE + 2 * TILE_OVERLAP; @@ -674,7 +684,7 @@ impl<'a> GpuProcessor<'a> { }, ]; for i in 0..MAX_MASKS as usize { - let view = self.mask_views.get(i).unwrap_or(&self.dummy_mask_view); + let view = mask_views.get(i).unwrap_or(&self.dummy_mask_view); bind_group_entries.push(wgpu::BindGroupEntry { binding: 3 + i as u32, resource: wgpu::BindingResource::TextureView(view), @@ -682,11 +692,11 @@ impl<'a> GpuProcessor<'a> { } bind_group_entries.push(wgpu::BindGroupEntry { binding: 3 + MAX_MASKS, - resource: wgpu::BindingResource::TextureView(&self.lut_texture_view), + resource: wgpu::BindingResource::TextureView(&lut_texture_view), }); bind_group_entries.push(wgpu::BindGroupEntry { binding: 4 + MAX_MASKS, - resource: wgpu::BindingResource::Sampler(&self.lut_sampler), + resource: wgpu::BindingResource::Sampler(&lut_sampler), }); bind_group_entries.push(wgpu::BindGroupEntry { binding: 5 + MAX_MASKS, @@ -757,7 +767,7 @@ impl<'a> GpuProcessor<'a> { } pub fn run_gpu_processing( - context: &GpuContext, + processor: &GpuProcessor, input_texture_view: &wgpu::TextureView, width: u32, height: u32, @@ -766,7 +776,7 @@ pub fn run_gpu_processing( lut: Option>, ) -> Result, String> { let start_time = Instant::now(); - let max_dim = context.limits.max_texture_dimension_2d; + let max_dim = processor.context.limits.max_texture_dimension_2d; if width > max_dim || height > max_dim { return Err(format!( @@ -775,8 +785,7 @@ pub fn run_gpu_processing( )); } - let processor = GpuProcessor::new(context, width, height, mask_bitmaps, lut)?; - let final_pixels = processor.run(input_texture_view, width, height, adjustments)?; + let final_pixels = processor.run(input_texture_view, width, height, adjustments, mask_bitmaps, lut)?; let duration = start_time.elapsed(); log::info!( @@ -863,8 +872,19 @@ pub fn process_and_get_dynamic_image( let cache = cache_lock.as_ref().unwrap(); + // Get or create the cached GpuProcessor + let mut processor_lock = state.gpu_processor.lock().unwrap(); + if processor_lock.is_none() { + log::info!("Creating new GpuProcessor (will be cached for future renders)"); + let processor = GpuProcessor::new(Arc::new(context.clone())) + .map_err(|e| format!("Failed to create GpuProcessor: {}", e))?; + *processor_lock = Some(Arc::new(processor)); + } + let processor = processor_lock.as_ref().unwrap().clone(); + drop(processor_lock); + let processed_pixels = run_gpu_processing( - context, + &processor, &cache.texture_view, cache.width, cache.height, diff --git a/src-tauri/src/image_loader.rs b/src-tauri/src/image_loader.rs index 0f102947e..91bc341e7 100644 --- a/src-tauri/src/image_loader.rs +++ b/src-tauri/src/image_loader.rs @@ -3,12 +3,13 @@ use crate::formats::is_raw_file; use crate::image_processing::apply_orientation; use crate::mask_generation::{MaskDefinition, SubMask, generate_mask_bitmap}; use crate::raw_processing::develop_raw_image; +use std::sync::Arc; use anyhow::{anyhow, Context, Result}; use base64::{Engine as _, engine::general_purpose}; use exif::{Reader as ExifReader, Tag}; use exr::prelude::*; use exr::image::pixel_vec::PixelVec; -use image::{DynamicImage, GenericImageView, ImageReader, imageops}; +use image::{DynamicImage, GenericImageView, ImageReader, Rgb32FImage, imageops}; use rawler::Orientation; use rayon::prelude::*; use serde::Deserialize; @@ -35,7 +36,7 @@ pub fn load_and_composite( ) -> Result { let base_image = load_base_image_from_bytes(base_image, path, use_fast_raw_dev, highlight_compression)?; - composite_patches_on_image(&base_image, adjustments) + composite_patches_on_image(&base_image, adjustments, None, path) } fn load_exr_from_bytes(bytes: &[u8]) -> Result { @@ -128,9 +129,28 @@ pub fn load_image_with_orientation(bytes: &[u8]) -> Result { Ok(DynamicImage::ImageRgb32F(oriented_image.to_rgb32f())) } +/// Decodes a base64-encoded patch image and resizes it to the target dimensions. +fn decode_and_resize_patch(color_b64: &str, target_w: u32, target_h: u32) -> Result { + let color_bytes = general_purpose::STANDARD.decode(color_b64)?; + let color_image_u8 = image::load_from_memory(&color_bytes)?.to_rgb8(); + + let (patch_w, patch_h) = color_image_u8.dimensions(); + let color_image_f32 = if target_w != patch_w || target_h != patch_h { + let resized = + imageops::resize(&color_image_u8, target_w, target_h, imageops::FilterType::Lanczos3); + DynamicImage::ImageRgb8(resized).to_rgb32f() + } else { + DynamicImage::ImageRgb8(color_image_u8).to_rgb32f() + }; + + Ok(color_image_f32) +} + pub fn composite_patches_on_image( base_image: &DynamicImage, current_adjustments: &Value, + state: Option<&tauri::State>, + image_path: &str, ) -> Result { let patches_val = match current_adjustments.get("aiPatches") { Some(val) => val, @@ -189,16 +209,52 @@ pub fn composite_patches_on_image( .get("color") .and_then(|v| v.as_str()) .context("Missing color data")?; - let color_bytes = general_purpose::STANDARD.decode(color_b64)?; - let color_image_u8 = image::load_from_memory(&color_bytes)?.to_rgb8(); - - let (patch_w, patch_h) = color_image_u8.dimensions(); - let color_image_f32 = if base_w != patch_w || base_h != patch_h { - let resized = - imageops::resize(&color_image_u8, base_w, base_h, imageops::FilterType::Lanczos3); - DynamicImage::ImageRgb8(resized).to_rgb32f() + + // Try to get decoded patch from cache + let color_image_f32 = if let Some(state) = state { + // Hash the patch content to detect when it changes (regeneration/edits) + use std::hash::{Hash, Hasher}; + use std::collections::hash_map::DefaultHasher; + let mut hasher = DefaultHasher::new(); + color_b64.hash(&mut hasher); + let content_hash = hasher.finish(); + + let cache_key = crate::cache::CacheKey::DecodedPatch { + image_path: image_path.to_string(), + patch_id: mask_def.id.clone(), + width: base_w, + height: base_h, + content_hash, + }; + + let mut cache_lock = state.processing_cache.lock().unwrap(); + if let Some(cached_data) = cache_lock.get(&cache_key) { + if let Some(patch_image) = cached_data.as_patch() { + drop(cache_lock); + (*patch_image).clone() + } else { + drop(cache_lock); + // Cache miss - decode and resize + let decoded = decode_and_resize_patch(color_b64, base_w, base_h)?; + let byte_size = crate::cache::calculate_patch_size(base_w, base_h); + let cached_data = crate::cache::CachedData::Patch(Arc::new(decoded.clone()), byte_size); + let mut cache_lock = state.processing_cache.lock().unwrap(); + cache_lock.insert(cache_key, cached_data); + decoded + } + } else { + drop(cache_lock); + // Cache miss - decode and resize + let decoded = decode_and_resize_patch(color_b64, base_w, base_h)?; + let byte_size = crate::cache::calculate_patch_size(base_w, base_h); + let cached_data = crate::cache::CachedData::Patch(Arc::new(decoded.clone()), byte_size); + let mut cache_lock = state.processing_cache.lock().unwrap(); + cache_lock.insert(cache_key, cached_data); + decoded + } } else { - DynamicImage::ImageRgb8(color_image_u8).to_rgb32f() + // No cache available - decode directly + decode_and_resize_patch(color_b64, base_w, base_h)? }; composited_rgba diff --git a/src-tauri/src/main.rs b/src-tauri/src/main.rs index 88b65b8d0..370dbb641 100644 --- a/src-tauri/src/main.rs +++ b/src-tauri/src/main.rs @@ -5,6 +5,7 @@ use mimalloc::MiMalloc; static GLOBAL: MiMalloc = MiMalloc; mod ai_processing; +mod cache; mod comfyui_connector; mod culling; mod file_management; @@ -100,9 +101,20 @@ pub struct GpuImageCache { pub struct AppState { original_image: Mutex>, + /// Final transformed CPU-side preview image. Stores the complete rendered preview after + /// all adjustments, masks, and patches have been applied. cached_preview: Mutex>, gpu_context: Mutex>, + /// GPU texture upload cache. Prevents redundant CPU-to-GPU memory transfers by caching + /// uploaded textures. Indexed by transform hash to invalidate when transforms change. gpu_image_cache: Mutex>, + /// Reusable GPU pipeline and shader resources. Avoids recreating WGPU pipelines, shaders, + /// and bind groups on every render call. Global scope - shared across all images. + gpu_processor: Mutex>>, + /// LRU cache for decoded masks, AI patches, and transformed images. Caches intermediate + /// processing results to avoid redundant base64 decoding, image loading, resizing, and + /// CPU-side transforms. Multi-image LRU with 2GB default size limit. + processing_cache: Mutex, ai_state: Mutex>, ai_init_lock: TokioMutex<()>, export_task_handle: Mutex>>, @@ -254,26 +266,19 @@ fn calculate_transform_hash(adjustments: &serde_json::Value) -> u64 { is_visible.hash(&mut hasher); if let Some(patch_data) = patch.get("patchData") { - let color_len = patch_data - .get("color") - .and_then(|v| v.as_str()) - .unwrap_or("") - .len(); - color_len.hash(&mut hasher); - - let mask_len = patch_data - .get("mask") - .and_then(|v| v.as_str()) - .unwrap_or("") - .len(); - mask_len.hash(&mut hasher); + // Hash actual content, not just length, to detect regenerated patches + if let Some(color_str) = patch_data.get("color").and_then(|v| v.as_str()) { + color_str.hash(&mut hasher); + } + + if let Some(mask_str) = patch_data.get("mask").and_then(|v| v.as_str()) { + mask_str.hash(&mut hasher); + } } else { - let data_len = patch - .get("patchDataBase64") - .and_then(|v| v.as_str()) - .unwrap_or("") - .len(); - data_len.hash(&mut hasher); + // Hash actual content, not just length + if let Some(data_str) = patch.get("patchDataBase64").and_then(|v| v.as_str()) { + data_str.hash(&mut hasher); + } } if let Some(sub_masks_val) = patch.get("subMasks") { @@ -292,6 +297,70 @@ fn calculate_transform_hash(adjustments: &serde_json::Value) -> u64 { hasher.finish() } +/// Gets the transformed image either from cache or by computing it. +/// +/// This function encapsulates the caching logic for CPU-side transforms (orientation, rotation, crop). +/// It checks if a cached result exists with a matching transform_hash and context, and if not, calls +/// the provided compute function to generate the transformed image and caches the result in the +/// processing_cache LRU. +/// +/// The context string prevents cache contamination between different rendering paths (e.g., "Preview" +/// with patches vs "OriginalPreview" with RAW defaults). +fn get_or_compute_transformed_image( + state: &tauri::State, + image_path: &str, + transform_hash: u64, + context_name: &str, + compute_fn: F, +) -> Result<(DynamicImage, (f32, f32)), String> +where + F: FnOnce() -> Result<(DynamicImage, (f32, f32)), String>, +{ + // Create composite cache key from transform_hash + context to prevent contamination + let mut hasher = DefaultHasher::new(); + transform_hash.hash(&mut hasher); + context_name.hash(&mut hasher); + let composite_hash = hasher.finish(); + + log::debug!("Cache: {} - checking transform cache (composite_hash: {:016x}, transform_hash: {:016x})", + context_name, composite_hash, transform_hash); + + // Check processing_cache for existing transformed image + let mut cache_lock = state.processing_cache.lock().unwrap(); + if let Some(cached_data) = cache_lock.get_transformed_image(image_path, composite_hash) { + if let Some((img, offset)) = cached_data.as_transformed_image() { + drop(cache_lock); + return Ok(((*img).clone(), offset)); + } + } + drop(cache_lock); + + // Cache miss - compute the transformed image + log::debug!("Cache: {} - computing transformed image...", context_name); + let (transformed, offset) = compute_fn()?; + + // Cache the result + let (width, height) = transformed.dimensions(); + let byte_size = cache::calculate_transformed_image_size(width, height); + let cache_key = cache::CacheKey::TransformedImage { + image_path: image_path.to_string(), + composite_hash, + width, + height, + }; + let cached_data = cache::CachedData::TransformedImage( + Arc::new(transformed.clone()), + offset, + byte_size, + ); + + let mut cache_lock = state.processing_cache.lock().unwrap(); + cache_lock.insert(cache_key, cached_data); + drop(cache_lock); + + Ok((transformed, offset)) +} + fn calculate_full_job_hash(path: &str, adjustments: &serde_json::Value) -> u64 { let mut hasher = DefaultHasher::new(); path.hash(&mut hasher); @@ -304,11 +373,20 @@ fn generate_transformed_preview( adjustments: &serde_json::Value, app_handle: &tauri::AppHandle, ) -> Result<(DynamicImage, f32, (f32, f32)), String> { - let patched_original_image = composite_patches_on_image(&loaded_image.image, adjustments) - .map_err(|e| format!("Failed to composite AI patches: {}", e))?; + let state = app_handle.state::(); + let transform_hash = calculate_transform_hash(adjustments); - let (transformed_full_res, unscaled_crop_offset) = - apply_all_transformations(&patched_original_image, adjustments); + let (transformed_full_res, unscaled_crop_offset) = get_or_compute_transformed_image( + &state, + &loaded_image.path, + transform_hash, + "Preview", + || { + let patched_original_image = composite_patches_on_image(&loaded_image.image, adjustments, Some(&state), &loaded_image.path) + .map_err(|e| format!("Failed to composite AI patches: {}", e))?; + Ok(apply_all_transformations(&patched_original_image, adjustments)) + }, + )?; let settings = load_settings(app_handle.clone()).unwrap_or_default(); let final_preview_dim = settings.editor_preview_resolution.unwrap_or(1920); @@ -426,8 +504,13 @@ async fn load_image( let (orig_width, orig_height) = pristine_img.dimensions(); let is_raw = is_raw_file(&path); + log::info!("Cache: loading image {}", path); *state.cached_preview.lock().unwrap() = None; *state.gpu_image_cache.lock().unwrap() = None; + + // Note: processing_cache (masks, patches, transforms) persists across image loads + // for fast browsing. LRU eviction handles memory limits automatically. + *state.original_image.lock().unwrap() = Some(LoadedImage { path: path.clone(), image: pristine_img, @@ -587,16 +670,54 @@ fn apply_adjustments( unscaled_crop_offset.1 * scale_for_gpu, ); + // Generate or retrieve cached mask bitmaps let mask_bitmaps: Vec, Vec>> = mask_definitions .iter() .filter_map(|def| { - generate_mask_bitmap( + // Create cache key from mask definition AND viewport state (scale, crop offset) + let mask_json = serde_json::to_string(def).ok()?; + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + mask_json.hash(&mut hasher); + // Include viewport state in hash since it affects the final bitmap + scale_for_gpu.to_bits().hash(&mut hasher); + scaled_crop_offset.0.to_bits().hash(&mut hasher); + scaled_crop_offset.1.to_bits().hash(&mut hasher); + let transform_hash = hasher.finish(); + + let cache_key = cache::CacheKey::DecodedMask { + image_path: loaded_image.path.clone(), + mask_id: def.id.clone(), + width: preview_width, + height: preview_height, + transform_hash, + }; + + // Try to get from cache first + let mut cache_lock = state.processing_cache.lock().unwrap(); + if let Some(cached_data) = cache_lock.get(&cache_key) { + if let Some(mask_image) = cached_data.as_mask() { + return Some((*mask_image).clone()); + } + } + drop(cache_lock); + + // Cache miss - generate the mask + let mask_bitmap = generate_mask_bitmap( def, preview_width, preview_height, scale_for_gpu, scaled_crop_offset, - ) + )?; + + // Store in cache + let byte_size = cache::calculate_mask_size(preview_width, preview_height); + let cached_data = cache::CachedData::Mask(Arc::new(mask_bitmap.clone()), byte_size); + let mut cache_lock = state.processing_cache.lock().unwrap(); + cache_lock.insert(cache_key, cached_data); + drop(cache_lock); + + Some(mask_bitmap) }) .collect(); @@ -661,7 +782,7 @@ fn generate_uncropped_preview( let is_raw = loaded_image.is_raw; let unique_hash = calculate_full_job_hash(&path, &adjustments_clone); let patched_image = - match composite_patches_on_image(&loaded_image.image, &adjustments_clone) { + match composite_patches_on_image(&loaded_image.image, &adjustments_clone, Some(&state), &path) { Ok(img) => img, Err(e) => { eprintln!("Failed to composite patches for uncropped preview: {}", e); @@ -756,8 +877,15 @@ fn generate_original_transformed_preview( apply_cpu_default_raw_processing(&mut image_for_preview); } - let (transformed_full_res, _unscaled_crop_offset) = - apply_all_transformations(&image_for_preview, &js_adjustments); + let transform_hash = calculate_transform_hash(&js_adjustments); + + let (transformed_full_res, _unscaled_crop_offset) = get_or_compute_transformed_image( + &state, + &loaded_image.path, + transform_hash, + "OriginalPreview", + || Ok(apply_all_transformations(&image_for_preview, &js_adjustments)), + )?; let settings = load_settings(app_handle).unwrap_or_default(); let preview_dim = settings.editor_preview_resolution.unwrap_or(1920); @@ -804,11 +932,19 @@ fn generate_fullscreen_preview( .path .clone(); let unique_hash = calculate_full_job_hash(&path, &js_adjustments); - let base_image = composite_patches_on_image(&original_image, &js_adjustments) - .map_err(|e| format!("Failed to composite AI patches for fullscreen: {}", e))?; + let transform_hash = calculate_transform_hash(&js_adjustments); - let (transformed_image, unscaled_crop_offset) = - apply_all_transformations(&base_image, &js_adjustments); + let (transformed_image, unscaled_crop_offset) = get_or_compute_transformed_image( + &state, + &path, + transform_hash, + "Fullscreen", + || { + let base_image = composite_patches_on_image(&original_image, &js_adjustments, Some(&state), &path) + .map_err(|e| format!("Failed to composite AI patches for fullscreen: {}", e))?; + Ok(apply_all_transformations(&base_image, &js_adjustments)) + }, + )?; let (img_w, img_h) = transformed_image.dimensions(); let mask_definitions: Vec = js_adjustments @@ -854,8 +990,15 @@ fn process_image_for_export( state: &tauri::State, is_raw: bool, ) -> Result { - let (transformed_image, unscaled_crop_offset) = - apply_all_transformations(&base_image, &js_adjustments); + let transform_hash = calculate_transform_hash(&js_adjustments); + + let (transformed_image, unscaled_crop_offset) = get_or_compute_transformed_image( + &state, + path, + transform_hash, + "Export", + || Ok(apply_all_transformations(&base_image, &js_adjustments)), + )?; let (img_w, img_h) = transformed_image.dimensions(); let mask_definitions: Vec = js_adjustments @@ -1002,7 +1145,7 @@ async fn export_image( let task = tokio::spawn(async move { let state = app_handle.state::(); let processing_result: Result<(), String> = (|| { - let base_image = composite_patches_on_image(&original_image_data, &js_adjustments) + let base_image = composite_patches_on_image(&original_image_data, &js_adjustments, Some(&state), &original_path) .map_err(|e| format!("Failed to composite AI patches for export: {}", e))?; let final_image = process_image_for_export( @@ -1966,7 +2109,7 @@ fn calculate_dynamic_patch_radius(width: u32, height: u32) -> u32 { #[tauri::command] async fn invoke_generative_replace_with_mask_def( - _path: String, + path: String, patch_definition: AiPatchDefinition, current_adjustments: Value, use_fast_inpaint: bool, @@ -1985,7 +2128,7 @@ async fn invoke_generative_replace_with_mask_def( } let (base_image, _) = get_full_image_for_processing(&state)?; - let source_image = composite_patches_on_image(&base_image, &source_image_adjustments) + let source_image = composite_patches_on_image(&base_image, &source_image_adjustments, Some(&state), &path) .map_err(|e| format!("Failed to prepare source image: {}", e))?; let (img_w, img_h) = source_image.dimensions(); @@ -2611,6 +2754,46 @@ fn setup_logging(app_handle: &tauri::AppHandle) { ); } +#[tauri::command] +fn get_cache_stats(state: tauri::State) -> Result { + let cache_lock = state.processing_cache.lock().unwrap(); + let stats = cache_lock.stats(); + log::debug!("Cache: stats requested - {} hits, {} misses, {} entries, {:.1} MB / {:.0} MB", + stats.hits, stats.misses, stats.entry_count, + stats.current_size_bytes as f64 / (1024.0 * 1024.0), + stats.max_size_bytes as f64 / (1024.0 * 1024.0)); + Ok(stats) +} + +#[tauri::command] +fn clear_processing_cache(state: tauri::State) -> Result<(), String> { + let mut cache_lock = state.processing_cache.lock().unwrap(); + cache_lock.clear(); + log::info!("Processing cache cleared manually"); + Ok(()) +} + +#[tauri::command] +fn set_cache_max_size(state: tauri::State, size_mb: usize) -> Result<(), String> { + let size_bytes = size_mb * 1024 * 1024; + let mut cache_lock = state.processing_cache.lock().unwrap(); + cache_lock.set_max_size(size_bytes); + log::info!("Cache max size set to {} MB ({} bytes)", size_mb, size_bytes); + Ok(()) +} + +#[tauri::command] +fn apply_cache_settings(state: tauri::State, app_handle: tauri::AppHandle) -> Result<(), String> { + let settings = load_settings(app_handle).unwrap_or_default(); + let cache_size_mb = settings.cache_max_size_mb.unwrap_or(2048); + let cache_size_bytes = cache_size_mb * 1024 * 1024; + + let mut cache_lock = state.processing_cache.lock().unwrap(); + cache_lock.set_max_size(cache_size_bytes); + log::info!("Applied cache settings: {} MB max size", cache_size_mb); + Ok(()) +} + fn main() { tauri::Builder::default() .plugin(tauri_plugin_os::init()) @@ -2667,6 +2850,27 @@ fn main() { } } + // Initialize cache with user's saved settings + let cache_size_mb = settings.cache_max_size_mb.unwrap_or(2048); + let cache_size_bytes = cache_size_mb * 1024 * 1024; + log::info!("Initializing image processing cache with {} MB limit", cache_size_mb); + + app.manage(AppState { + original_image: Mutex::new(None), + cached_preview: Mutex::new(None), + gpu_context: Mutex::new(None), + gpu_image_cache: Mutex::new(None), + gpu_processor: Mutex::new(None), + processing_cache: Mutex::new(cache::ImageProcessingCache::new(cache_size_bytes)), + ai_state: Mutex::new(None), + ai_init_lock: TokioMutex::new(()), + export_task_handle: Mutex::new(None), + panorama_result: Arc::new(Mutex::new(None)), + indexing_task_handle: Mutex::new(None), + lut_cache: Mutex::new(HashMap::new()), + thumbnail_cancellation_token: Arc::new(AtomicBool::new(false)), + }); + let window_cfg = app.config().app.windows.get(0).unwrap().clone(); let transparent = settings.transparent.unwrap_or(window_cfg.transparent); let decorations = settings.decorations.unwrap_or(window_cfg.decorations); @@ -2685,19 +2889,6 @@ fn main() { Ok(()) }) - .manage(AppState { - original_image: Mutex::new(None), - cached_preview: Mutex::new(None), - gpu_context: Mutex::new(None), - gpu_image_cache: Mutex::new(None), - ai_state: Mutex::new(None), - ai_init_lock: TokioMutex::new(()), - export_task_handle: Mutex::new(None), - panorama_result: Arc::new(Mutex::new(None)), - indexing_task_handle: Mutex::new(None), - lut_cache: Mutex::new(HashMap::new()), - thumbnail_cancellation_token: Arc::new(AtomicBool::new(false)), - }) .invoke_handler(tauri::generate_handler![ load_image, apply_adjustments, @@ -2768,6 +2959,10 @@ fn main() { tagging::start_background_indexing, tagging::clear_all_tags, culling::cull_images, + get_cache_stats, + clear_processing_cache, + set_cache_max_size, + apply_cache_settings, ]) .run(tauri::generate_context!()) .expect("error while running tauri application"); diff --git a/src/components/panel/SettingsPanel.tsx b/src/components/panel/SettingsPanel.tsx index e64067b24..d3b7ac1aa 100644 --- a/src/components/panel/SettingsPanel.tsx +++ b/src/components/panel/SettingsPanel.tsx @@ -1,4 +1,4 @@ -import { useEffect, useState } from 'react'; +import { useEffect, useState, useRef } from 'react'; import { ArrowLeft, Check, @@ -135,6 +135,84 @@ const KeybindItem = ({ keys, description }: KeybindItemProps) => ( ); +const CacheStats = () => { + const [stats, setStats] = useState(null); + const [error, setError] = useState(null); + + const fetchStats = async () => { + try { + const cacheStats = await invoke('get_cache_stats'); + setStats(cacheStats); + setError(null); + } catch (err) { + console.error('Failed to fetch cache stats:', err); + setError(String(err)); + } + }; + + const handleClearCache = async () => { + try { + await invoke('clear_processing_cache'); + await fetchStats(); + } catch (err) { + console.error('Failed to clear cache:', err); + setError(String(err)); + } + }; + + useEffect(() => { + fetchStats(); + const interval = setInterval(fetchStats, 2000); // Update every 2 seconds + return () => clearInterval(interval); + }, []); + + if (error) { + return ( +
+ Failed to load cache stats: {error} +
+ ); + } + + if (!stats) { + return
Loading cache statistics...
; + } + + const hitRate = stats.hits + stats.misses > 0 + ? ((stats.hits / (stats.hits + stats.misses)) * 100).toFixed(1) + : '0.0'; + const utilizationPercent = ((stats.currentSizeBytes / stats.maxSizeBytes) * 100).toFixed(1); + const currentSizeMB = (stats.currentSizeBytes / (1024 * 1024)).toFixed(1); + const maxSizeMB = (stats.maxSizeBytes / (1024 * 1024)).toFixed(0); + + return ( +
+
+ Cache Efficiency: + {hitRate}% hit rate +
+
+ Memory Usage: + {currentSizeMB} / {maxSizeMB} MB ({utilizationPercent}%) +
+
+ Cached Items: + {stats.entryCount} +
+
+ Stats: + {stats.hits} hits / {stats.misses} misses / {stats.evictions} evictions +
+ +
+ ); +}; + const SettingItem = ({ children, description, label }: SettingItemProps) => (
@@ -295,8 +373,10 @@ export default function SettingsPanel({ rawHighlightCompression: appSettings?.rawHighlightCompression ?? 2.5, processingBackend: appSettings?.processingBackend || 'auto', linuxGpuOptimization: appSettings?.linuxGpuOptimization ?? false, + cacheMaxSizeMb: appSettings?.cacheMaxSizeMb || 2048, }); const [restartRequired, setRestartRequired] = useState(false); + const cacheUpdateTimeoutRef = useRef(null); useEffect(() => { if (appSettings?.comfyuiAddress !== comfyUiAddress) { @@ -311,10 +391,19 @@ export default function SettingsPanel({ rawHighlightCompression: appSettings?.rawHighlightCompression ?? 2.5, processingBackend: appSettings?.processingBackend || 'auto', linuxGpuOptimization: appSettings?.linuxGpuOptimization ?? false, + cacheMaxSizeMb: appSettings?.cacheMaxSizeMb || 2048, }); setRestartRequired(false); }, [appSettings]); + useEffect(() => { + return () => { + if (cacheUpdateTimeoutRef.current) { + clearTimeout(cacheUpdateTimeoutRef.current); + } + }; + }, []); + const handleProcessingSettingChange = (key: string, value: any) => { setProcessingSettings((prev) => ({ ...prev, [key]: value })); if (key === 'processingBackend' || key === 'linuxGpuOptimization') { @@ -664,6 +753,37 @@ export default function SettingsPanel({ /> + +
+ { + const newValue = parseInt(e.target.value); + handleProcessingSettingChange('cacheMaxSizeMb', newValue); + + // Clear existing timeout + if (cacheUpdateTimeoutRef.current) { + clearTimeout(cacheUpdateTimeoutRef.current); + } + + // Set new timeout to update backend after user stops adjusting + cacheUpdateTimeoutRef.current = setTimeout(() => { + invoke('set_cache_max_size', { sizeMb: newValue }); + }, 500); + }} + /> + +
+
+ {restartRequired && ( <>