diff --git a/.gitignore b/.gitignore index 2d5b0cf..e45fe66 100644 --- a/.gitignore +++ b/.gitignore @@ -120,4 +120,16 @@ dmypy.json *.sublime-workspace # Data folder -data/ \ No newline at end of file +data/ + +# OS generated files +core +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db +.spyderworkspace +*~ diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 3afd869..f131e13 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -10,16 +10,9 @@ sphinx: configuration: docs/conf.py conda: - environment: docs/environment.yml + environment: environment-nocuda.yml python: install: - method: pip - path: . -# build: -# tools: -# python: "3.9" -# # You can also specify other tool versions: -# # nodejs: "16" -# # rust: "1.55" -# # golang: "1.17" \ No newline at end of file + path: . \ No newline at end of file diff --git a/Untitled.ipynb b/Untitled.ipynb new file mode 100644 index 0000000..a5f6feb --- /dev/null +++ b/Untitled.ipynb @@ -0,0 +1,57 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "id": "01ef0dd3-66df-4a9d-a98f-3041923bd1dc", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "bfeb1035c2f3458ab7ca2885266de1a3", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(options=(('x + 0', 0), ('x + 1', 1), ('x + 2', 2), ('x + 3', 3), ('x + 4', 4), ('x + 5', 5), ('x + 6'…" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21b32054-d63d-4a14-9f28-67bcdfc88244", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/data/ALS_832_Data/fake_als_data.h5 b/data/ALS_832_Data/fake_als_data.h5 new file mode 100644 index 0000000..8b27d6e Binary files /dev/null and b/data/ALS_832_Data/fake_als_data.h5 differ diff --git a/data/ALS_832_Data/make_fake_als_data.py b/data/ALS_832_Data/make_fake_als_data.py new file mode 100644 index 0000000..0a32611 --- /dev/null +++ b/data/ALS_832_Data/make_fake_als_data.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[58]: + + +import numpy as np +import h5py +import os +import dxchange + + +# In[59]: + + +filename = "./fake_als_data.h5" + +numslices = 100 +numrays = 120 +numangles = 80 + +tomo = np.random.rand(numangles, numslices, numrays) +flat = np.ones((5, numslices, numrays)) # first dimension can be changed to anything +dark = np.zeros((5, numslices, numrays)) # first dimension can be changed to anything +angles = np.linspace(0, np.pi, num=numangles) + + +# In[60]: + + +pxsize = 1 # in mm +camera_distance = 100 * np.ones(len(tomo) + len(flat) + len(dark)) # in mm +energy = 10000 # in eV + + +# In[61]: + + +if os.path.exists(filename): + os.remove(filename) +with h5py.File(filename, "a") as f: + det = f.create_group("measurement/instrument/detector") + det.create_dataset("dimension_y", data=np.asarray(numslices)[np.newaxis]) + det.create_dataset("dimension_x", data=np.asarray(numrays)[np.newaxis]) + det.create_dataset("pixel_size", data=np.asarray(pxsize)[np.newaxis]) + rot = f.create_group("process/acquisition/rotation") + rot.create_dataset("num_angles", data=np.asarray(len(angles))[np.newaxis]) + rot.create_dataset( + "range", + data=np.asarray((180 / np.pi) * np.abs(angles[-1] - angles[0]))[np.newaxis], + ) + f.create_dataset( + "measurement/instrument/camera_motor_stack/setup/camera_distance", + data=camera_distance, + ) + f.create_dataset( + "measurement/instrument/monochromator/energy", + data=np.asarray(energy)[np.newaxis], + ) + exch = f.create_group("exchange") + exch.create_dataset("data", data=tomo) + exch.create_dataset("data_white", data=flat) + exch.create_dataset("data_dark", data=dark) + exch.create_dataset("theta", data=(180 / np.pi) * angles) + + +# In[62]: + + +numslices = int( + dxchange.read_hdf5(filename, "/measurement/instrument/detector/dimension_y")[0] +) +numrays = int( + dxchange.read_hdf5(filename, "/measurement/instrument/detector/dimension_x")[0] +) +pxsize = ( + dxchange.read_hdf5(filename, "/measurement/instrument/detector/pixel_size")[0] + / 10.0 +) # /10 to convert units from mm to cm +numangles = int( + dxchange.read_hdf5(filename, "/process/acquisition/rotation/num_angles")[0] +) +propagation_dist = dxchange.read_hdf5( + filename, "/measurement/instrument/camera_motor_stack/setup/camera_distance" +)[1] +kev = ( + dxchange.read_hdf5(filename, "/measurement/instrument/monochromator/energy")[0] + / 1000 +) +angularrange = dxchange.read_hdf5(filename, "/process/acquisition/rotation/range")[0] + + +# In[63]: + + +tomo, flat, dark, angles = dxchange.exchange.read_aps_tomoscan_hdf5(filename) +print(tomo.shape, tomo.dtype, tomo.min(), tomo.max()) +print(flat.shape, flat.dtype, flat.min(), flat.max()) +print(dark.shape, dark.dtype, dark.min(), dark.max()) +print(angles.shape, angles.dtype, angles.min(), angles.max()) + + +# In[ ]: + + +# In[ ]: diff --git a/data/tomo77_downsampled0p5.tif b/data/tiff_image/tomo77_downsampled0p5.tif similarity index 100% rename from data/tomo77_downsampled0p5.tif rename to data/tiff_image/tomo77_downsampled0p5.tif diff --git a/data/tiff_sequence/tomo77_000.tiff b/data/tiff_sequence/tomo77_000.tiff new file mode 100644 index 0000000..b086656 Binary files /dev/null and b/data/tiff_sequence/tomo77_000.tiff differ diff --git a/data/tiff_sequence/tomo77_001.tiff b/data/tiff_sequence/tomo77_001.tiff new file mode 100644 index 0000000..40bb6bb Binary files /dev/null and b/data/tiff_sequence/tomo77_001.tiff differ diff --git a/data/tiff_sequence/tomo77_002.tiff b/data/tiff_sequence/tomo77_002.tiff new file mode 100644 index 0000000..38eee15 Binary files /dev/null and b/data/tiff_sequence/tomo77_002.tiff differ diff --git a/data/tiff_sequence/tomo77_003.tiff b/data/tiff_sequence/tomo77_003.tiff new file mode 100644 index 0000000..f461187 Binary files /dev/null and b/data/tiff_sequence/tomo77_003.tiff differ diff --git a/data/tiff_sequence/tomo77_004.tiff b/data/tiff_sequence/tomo77_004.tiff new file mode 100644 index 0000000..f3948aa Binary files /dev/null and b/data/tiff_sequence/tomo77_004.tiff differ diff --git a/data/tiff_sequence/tomo77_005.tiff b/data/tiff_sequence/tomo77_005.tiff new file mode 100644 index 0000000..fc90b86 Binary files /dev/null and b/data/tiff_sequence/tomo77_005.tiff differ diff --git a/data/tiff_sequence/tomo77_006.tiff b/data/tiff_sequence/tomo77_006.tiff new file mode 100644 index 0000000..82f3f30 Binary files /dev/null and b/data/tiff_sequence/tomo77_006.tiff differ diff --git a/data/tiff_sequence/tomo77_007.tiff b/data/tiff_sequence/tomo77_007.tiff new file mode 100644 index 0000000..6240a25 Binary files /dev/null and b/data/tiff_sequence/tomo77_007.tiff differ diff --git a/data/tiff_sequence/tomo77_008.tiff b/data/tiff_sequence/tomo77_008.tiff new file mode 100644 index 0000000..9d07842 Binary files /dev/null and b/data/tiff_sequence/tomo77_008.tiff differ diff --git a/data/tiff_sequence/tomo77_009.tiff b/data/tiff_sequence/tomo77_009.tiff new file mode 100644 index 0000000..265b718 Binary files /dev/null and b/data/tiff_sequence/tomo77_009.tiff differ diff --git a/data/tiff_sequence/tomo77_010.tiff b/data/tiff_sequence/tomo77_010.tiff new file mode 100644 index 0000000..29f5e52 Binary files /dev/null and b/data/tiff_sequence/tomo77_010.tiff differ diff --git a/data/tiff_sequence/tomo77_011.tiff b/data/tiff_sequence/tomo77_011.tiff new file mode 100644 index 0000000..4cd87be Binary files /dev/null and b/data/tiff_sequence/tomo77_011.tiff differ diff --git a/data/tiff_sequence/tomo77_012.tiff b/data/tiff_sequence/tomo77_012.tiff new file mode 100644 index 0000000..bf28e31 Binary files /dev/null and b/data/tiff_sequence/tomo77_012.tiff differ diff --git a/data/tiff_sequence/tomo77_013.tiff b/data/tiff_sequence/tomo77_013.tiff new file mode 100644 index 0000000..7ea3df3 Binary files /dev/null and b/data/tiff_sequence/tomo77_013.tiff differ diff --git a/data/tiff_sequence/tomo77_014.tiff b/data/tiff_sequence/tomo77_014.tiff new file mode 100644 index 0000000..1df70fd Binary files /dev/null and b/data/tiff_sequence/tomo77_014.tiff differ diff --git a/data/tiff_sequence/tomo77_015.tiff b/data/tiff_sequence/tomo77_015.tiff new file mode 100644 index 0000000..e6d0cc6 Binary files /dev/null and b/data/tiff_sequence/tomo77_015.tiff differ diff --git a/data/tiff_sequence/tomo77_016.tiff b/data/tiff_sequence/tomo77_016.tiff new file mode 100644 index 0000000..dbfe456 Binary files /dev/null and b/data/tiff_sequence/tomo77_016.tiff differ diff --git a/data/tiff_sequence/tomo77_017.tiff b/data/tiff_sequence/tomo77_017.tiff new file mode 100644 index 0000000..fdf152e Binary files /dev/null and b/data/tiff_sequence/tomo77_017.tiff differ diff --git a/data/tiff_sequence/tomo77_018.tiff b/data/tiff_sequence/tomo77_018.tiff new file mode 100644 index 0000000..806c51e Binary files /dev/null and b/data/tiff_sequence/tomo77_018.tiff differ diff --git a/data/tiff_sequence/tomo77_019.tiff b/data/tiff_sequence/tomo77_019.tiff new file mode 100644 index 0000000..62f728a Binary files /dev/null and b/data/tiff_sequence/tomo77_019.tiff differ diff --git a/data/tiff_sequence/tomo77_020.tiff b/data/tiff_sequence/tomo77_020.tiff new file mode 100644 index 0000000..4fad71e Binary files /dev/null and b/data/tiff_sequence/tomo77_020.tiff differ diff --git a/data/tiff_sequence/tomo77_021.tiff b/data/tiff_sequence/tomo77_021.tiff new file mode 100644 index 0000000..d9b89e5 Binary files /dev/null and b/data/tiff_sequence/tomo77_021.tiff differ diff --git a/data/tiff_sequence/tomo77_022.tiff b/data/tiff_sequence/tomo77_022.tiff new file mode 100644 index 0000000..80c4508 Binary files /dev/null and b/data/tiff_sequence/tomo77_022.tiff differ diff --git a/data/tiff_sequence/tomo77_023.tiff b/data/tiff_sequence/tomo77_023.tiff new file mode 100644 index 0000000..9d9df9f Binary files /dev/null and b/data/tiff_sequence/tomo77_023.tiff differ diff --git a/data/tiff_sequence/tomo77_024.tiff b/data/tiff_sequence/tomo77_024.tiff new file mode 100644 index 0000000..b1168a3 Binary files /dev/null and b/data/tiff_sequence/tomo77_024.tiff differ diff --git a/data/tiff_sequence/tomo77_025.tiff b/data/tiff_sequence/tomo77_025.tiff new file mode 100644 index 0000000..fd11401 Binary files /dev/null and b/data/tiff_sequence/tomo77_025.tiff differ diff --git a/data/tiff_sequence/tomo77_026.tiff b/data/tiff_sequence/tomo77_026.tiff new file mode 100644 index 0000000..978d803 Binary files /dev/null and b/data/tiff_sequence/tomo77_026.tiff differ diff --git a/data/tiff_sequence/tomo77_027.tiff b/data/tiff_sequence/tomo77_027.tiff new file mode 100644 index 0000000..2364a36 Binary files /dev/null and b/data/tiff_sequence/tomo77_027.tiff differ diff --git a/data/tiff_sequence/tomo77_028.tiff b/data/tiff_sequence/tomo77_028.tiff new file mode 100644 index 0000000..be99f8d Binary files /dev/null and b/data/tiff_sequence/tomo77_028.tiff differ diff --git a/data/tiff_sequence/tomo77_029.tiff b/data/tiff_sequence/tomo77_029.tiff new file mode 100644 index 0000000..b0d333d Binary files /dev/null and b/data/tiff_sequence/tomo77_029.tiff differ diff --git a/data/tiff_sequence/tomo77_030.tiff b/data/tiff_sequence/tomo77_030.tiff new file mode 100644 index 0000000..7d3394c Binary files /dev/null and b/data/tiff_sequence/tomo77_030.tiff differ diff --git a/data/tiff_sequence/tomo77_031.tiff b/data/tiff_sequence/tomo77_031.tiff new file mode 100644 index 0000000..5479ea7 Binary files /dev/null and b/data/tiff_sequence/tomo77_031.tiff differ diff --git a/data/tiff_sequence/tomo77_032.tiff b/data/tiff_sequence/tomo77_032.tiff new file mode 100644 index 0000000..e087070 Binary files /dev/null and b/data/tiff_sequence/tomo77_032.tiff differ diff --git a/data/tiff_sequence/tomo77_033.tiff b/data/tiff_sequence/tomo77_033.tiff new file mode 100644 index 0000000..71415e7 Binary files /dev/null and b/data/tiff_sequence/tomo77_033.tiff differ diff --git a/data/tiff_sequence/tomo77_034.tiff b/data/tiff_sequence/tomo77_034.tiff new file mode 100644 index 0000000..dc992fe Binary files /dev/null and b/data/tiff_sequence/tomo77_034.tiff differ diff --git a/data/tiff_sequence/tomo77_035.tiff b/data/tiff_sequence/tomo77_035.tiff new file mode 100644 index 0000000..d99a88b Binary files /dev/null and b/data/tiff_sequence/tomo77_035.tiff differ diff --git a/data/tiff_sequence/tomo77_036.tiff b/data/tiff_sequence/tomo77_036.tiff new file mode 100644 index 0000000..821ccd6 Binary files /dev/null and b/data/tiff_sequence/tomo77_036.tiff differ diff --git a/data/tiff_sequence/tomo77_037.tiff b/data/tiff_sequence/tomo77_037.tiff new file mode 100644 index 0000000..88832bb Binary files /dev/null and b/data/tiff_sequence/tomo77_037.tiff differ diff --git a/data/tiff_sequence/tomo77_038.tiff b/data/tiff_sequence/tomo77_038.tiff new file mode 100644 index 0000000..78764b8 Binary files /dev/null and b/data/tiff_sequence/tomo77_038.tiff differ diff --git a/data/tiff_sequence/tomo77_039.tiff b/data/tiff_sequence/tomo77_039.tiff new file mode 100644 index 0000000..9500a83 Binary files /dev/null and b/data/tiff_sequence/tomo77_039.tiff differ diff --git a/data/tiff_sequence/tomo77_040.tiff b/data/tiff_sequence/tomo77_040.tiff new file mode 100644 index 0000000..58ca087 Binary files /dev/null and b/data/tiff_sequence/tomo77_040.tiff differ diff --git a/data/tiff_sequence/tomo77_041.tiff b/data/tiff_sequence/tomo77_041.tiff new file mode 100644 index 0000000..72147bc Binary files /dev/null and b/data/tiff_sequence/tomo77_041.tiff differ diff --git a/data/tiff_sequence/tomo77_042.tiff b/data/tiff_sequence/tomo77_042.tiff new file mode 100644 index 0000000..9ecb232 Binary files /dev/null and b/data/tiff_sequence/tomo77_042.tiff differ diff --git a/data/tiff_sequence/tomo77_043.tiff b/data/tiff_sequence/tomo77_043.tiff new file mode 100644 index 0000000..fafc55b Binary files /dev/null and b/data/tiff_sequence/tomo77_043.tiff differ diff --git a/data/tiff_sequence/tomo77_044.tiff b/data/tiff_sequence/tomo77_044.tiff new file mode 100644 index 0000000..142eb66 Binary files /dev/null and b/data/tiff_sequence/tomo77_044.tiff differ diff --git a/data/tiff_sequence/tomo77_045.tiff b/data/tiff_sequence/tomo77_045.tiff new file mode 100644 index 0000000..37201ae Binary files /dev/null and b/data/tiff_sequence/tomo77_045.tiff differ diff --git a/data/tiff_sequence/tomo77_046.tiff b/data/tiff_sequence/tomo77_046.tiff new file mode 100644 index 0000000..f6049a5 Binary files /dev/null and b/data/tiff_sequence/tomo77_046.tiff differ diff --git a/data/tiff_sequence/tomo77_047.tiff b/data/tiff_sequence/tomo77_047.tiff new file mode 100644 index 0000000..98955e9 Binary files /dev/null and b/data/tiff_sequence/tomo77_047.tiff differ diff --git a/data/tiff_sequence/tomo77_048.tiff b/data/tiff_sequence/tomo77_048.tiff new file mode 100644 index 0000000..3742cc6 Binary files /dev/null and b/data/tiff_sequence/tomo77_048.tiff differ diff --git a/data/tiff_sequence/tomo77_049.tiff b/data/tiff_sequence/tomo77_049.tiff new file mode 100644 index 0000000..4db259b Binary files /dev/null and b/data/tiff_sequence/tomo77_049.tiff differ diff --git a/data/tiff_sequence/tomo77_050.tiff b/data/tiff_sequence/tomo77_050.tiff new file mode 100644 index 0000000..69aa34f Binary files /dev/null and b/data/tiff_sequence/tomo77_050.tiff differ diff --git a/data/tiff_sequence/tomo77_051.tiff b/data/tiff_sequence/tomo77_051.tiff new file mode 100644 index 0000000..6e59058 Binary files /dev/null and b/data/tiff_sequence/tomo77_051.tiff differ diff --git a/data/tiff_sequence/tomo77_052.tiff b/data/tiff_sequence/tomo77_052.tiff new file mode 100644 index 0000000..4e616f5 Binary files /dev/null and b/data/tiff_sequence/tomo77_052.tiff differ diff --git a/data/tiff_sequence/tomo77_053.tiff b/data/tiff_sequence/tomo77_053.tiff new file mode 100644 index 0000000..ee42cad Binary files /dev/null and b/data/tiff_sequence/tomo77_053.tiff differ diff --git a/data/tiff_sequence/tomo77_054.tiff b/data/tiff_sequence/tomo77_054.tiff new file mode 100644 index 0000000..b758ce6 Binary files /dev/null and b/data/tiff_sequence/tomo77_054.tiff differ diff --git a/data/tiff_sequence/tomo77_055.tiff b/data/tiff_sequence/tomo77_055.tiff new file mode 100644 index 0000000..a198db3 Binary files /dev/null and b/data/tiff_sequence/tomo77_055.tiff differ diff --git a/data/tiff_sequence/tomo77_056.tiff b/data/tiff_sequence/tomo77_056.tiff new file mode 100644 index 0000000..6c5307a Binary files /dev/null and b/data/tiff_sequence/tomo77_056.tiff differ diff --git a/data/tiff_sequence/tomo77_057.tiff b/data/tiff_sequence/tomo77_057.tiff new file mode 100644 index 0000000..4b4f135 Binary files /dev/null and b/data/tiff_sequence/tomo77_057.tiff differ diff --git a/data/tiff_sequence/tomo77_058.tiff b/data/tiff_sequence/tomo77_058.tiff new file mode 100644 index 0000000..b6b7db5 Binary files /dev/null and b/data/tiff_sequence/tomo77_058.tiff differ diff --git a/data/tiff_sequence/tomo77_059.tiff b/data/tiff_sequence/tomo77_059.tiff new file mode 100644 index 0000000..883e1ff Binary files /dev/null and b/data/tiff_sequence/tomo77_059.tiff differ diff --git a/data/tiff_sequence/tomo77_060.tiff b/data/tiff_sequence/tomo77_060.tiff new file mode 100644 index 0000000..1ddb89f Binary files /dev/null and b/data/tiff_sequence/tomo77_060.tiff differ diff --git a/data/tiff_sequence/tomo77_061.tiff b/data/tiff_sequence/tomo77_061.tiff new file mode 100644 index 0000000..b110541 Binary files /dev/null and b/data/tiff_sequence/tomo77_061.tiff differ diff --git a/data/tiff_sequence/tomo77_062.tiff b/data/tiff_sequence/tomo77_062.tiff new file mode 100644 index 0000000..5a30149 Binary files /dev/null and b/data/tiff_sequence/tomo77_062.tiff differ diff --git a/data/tiff_sequence/tomo77_063.tiff b/data/tiff_sequence/tomo77_063.tiff new file mode 100644 index 0000000..332809c Binary files /dev/null and b/data/tiff_sequence/tomo77_063.tiff differ diff --git a/data/tiff_sequence/tomo77_064.tiff b/data/tiff_sequence/tomo77_064.tiff new file mode 100644 index 0000000..c2bd610 Binary files /dev/null and b/data/tiff_sequence/tomo77_064.tiff differ diff --git a/data/tiff_sequence/tomo77_065.tiff b/data/tiff_sequence/tomo77_065.tiff new file mode 100644 index 0000000..bebf193 Binary files /dev/null and b/data/tiff_sequence/tomo77_065.tiff differ diff --git a/data/tiff_sequence/tomo77_066.tiff b/data/tiff_sequence/tomo77_066.tiff new file mode 100644 index 0000000..34d279e Binary files /dev/null and b/data/tiff_sequence/tomo77_066.tiff differ diff --git a/data/tiff_sequence/tomo77_067.tiff b/data/tiff_sequence/tomo77_067.tiff new file mode 100644 index 0000000..eb82c88 Binary files /dev/null and b/data/tiff_sequence/tomo77_067.tiff differ diff --git a/data/tiff_sequence/tomo77_068.tiff b/data/tiff_sequence/tomo77_068.tiff new file mode 100644 index 0000000..79abd22 Binary files /dev/null and b/data/tiff_sequence/tomo77_068.tiff differ diff --git a/data/tiff_sequence/tomo77_069.tiff b/data/tiff_sequence/tomo77_069.tiff new file mode 100644 index 0000000..1818b43 Binary files /dev/null and b/data/tiff_sequence/tomo77_069.tiff differ diff --git a/data/tiff_sequence/tomo77_070.tiff b/data/tiff_sequence/tomo77_070.tiff new file mode 100644 index 0000000..19ce8c9 Binary files /dev/null and b/data/tiff_sequence/tomo77_070.tiff differ diff --git a/data/tiff_sequence/tomo77_071.tiff b/data/tiff_sequence/tomo77_071.tiff new file mode 100644 index 0000000..a9ed2c3 Binary files /dev/null and b/data/tiff_sequence/tomo77_071.tiff differ diff --git a/data/tiff_sequence/tomo77_072.tiff b/data/tiff_sequence/tomo77_072.tiff new file mode 100644 index 0000000..6311899 Binary files /dev/null and b/data/tiff_sequence/tomo77_072.tiff differ diff --git a/data/tiff_sequence/tomo77_073.tiff b/data/tiff_sequence/tomo77_073.tiff new file mode 100644 index 0000000..2a56841 Binary files /dev/null and b/data/tiff_sequence/tomo77_073.tiff differ diff --git a/data/tiff_sequence/tomo77_074.tiff b/data/tiff_sequence/tomo77_074.tiff new file mode 100644 index 0000000..f1fea74 Binary files /dev/null and b/data/tiff_sequence/tomo77_074.tiff differ diff --git a/data/tiff_sequence/tomo77_075.tiff b/data/tiff_sequence/tomo77_075.tiff new file mode 100644 index 0000000..023fb1c Binary files /dev/null and b/data/tiff_sequence/tomo77_075.tiff differ diff --git a/data/tiff_sequence/tomo77_076.tiff b/data/tiff_sequence/tomo77_076.tiff new file mode 100644 index 0000000..0437f43 Binary files /dev/null and b/data/tiff_sequence/tomo77_076.tiff differ diff --git a/data/tiff_sequence/tomo77_077.tiff b/data/tiff_sequence/tomo77_077.tiff new file mode 100644 index 0000000..3a1a29c Binary files /dev/null and b/data/tiff_sequence/tomo77_077.tiff differ diff --git a/data/tiff_sequence/tomo77_078.tiff b/data/tiff_sequence/tomo77_078.tiff new file mode 100644 index 0000000..77dbe62 Binary files /dev/null and b/data/tiff_sequence/tomo77_078.tiff differ diff --git a/data/tiff_sequence/tomo77_079.tiff b/data/tiff_sequence/tomo77_079.tiff new file mode 100644 index 0000000..68ae20c Binary files /dev/null and b/data/tiff_sequence/tomo77_079.tiff differ diff --git a/data/tiff_sequence/tomo77_080.tiff b/data/tiff_sequence/tomo77_080.tiff new file mode 100644 index 0000000..6616321 Binary files /dev/null and b/data/tiff_sequence/tomo77_080.tiff differ diff --git a/data/tiff_sequence/tomo77_081.tiff b/data/tiff_sequence/tomo77_081.tiff new file mode 100644 index 0000000..6b26083 Binary files /dev/null and b/data/tiff_sequence/tomo77_081.tiff differ diff --git a/data/tiff_sequence/tomo77_082.tiff b/data/tiff_sequence/tomo77_082.tiff new file mode 100644 index 0000000..c533c94 Binary files /dev/null and b/data/tiff_sequence/tomo77_082.tiff differ diff --git a/data/tiff_sequence/tomo77_083.tiff b/data/tiff_sequence/tomo77_083.tiff new file mode 100644 index 0000000..3ac4502 Binary files /dev/null and b/data/tiff_sequence/tomo77_083.tiff differ diff --git a/data/tiff_sequence/tomo77_084.tiff b/data/tiff_sequence/tomo77_084.tiff new file mode 100644 index 0000000..3a2e4df Binary files /dev/null and b/data/tiff_sequence/tomo77_084.tiff differ diff --git a/data/tiff_sequence/tomo77_085.tiff b/data/tiff_sequence/tomo77_085.tiff new file mode 100644 index 0000000..8223c98 Binary files /dev/null and b/data/tiff_sequence/tomo77_085.tiff differ diff --git a/data/tiff_sequence/tomo77_086.tiff b/data/tiff_sequence/tomo77_086.tiff new file mode 100644 index 0000000..50b4aeb Binary files /dev/null and b/data/tiff_sequence/tomo77_086.tiff differ diff --git a/data/tiff_sequence/tomo77_087.tiff b/data/tiff_sequence/tomo77_087.tiff new file mode 100644 index 0000000..ad9273f Binary files /dev/null and b/data/tiff_sequence/tomo77_087.tiff differ diff --git a/data/tiff_sequence/tomo77_088.tiff b/data/tiff_sequence/tomo77_088.tiff new file mode 100644 index 0000000..37b60b7 Binary files /dev/null and b/data/tiff_sequence/tomo77_088.tiff differ diff --git a/data/tiff_sequence/tomo77_089.tiff b/data/tiff_sequence/tomo77_089.tiff new file mode 100644 index 0000000..30a33f0 Binary files /dev/null and b/data/tiff_sequence/tomo77_089.tiff differ diff --git a/data/tiff_sequence/tomo77_090.tiff b/data/tiff_sequence/tomo77_090.tiff new file mode 100644 index 0000000..8b5fec6 Binary files /dev/null and b/data/tiff_sequence/tomo77_090.tiff differ diff --git a/data/tiff_sequence/tomo77_091.tiff b/data/tiff_sequence/tomo77_091.tiff new file mode 100644 index 0000000..d287cca Binary files /dev/null and b/data/tiff_sequence/tomo77_091.tiff differ diff --git a/data/tiff_sequence/tomo77_092.tiff b/data/tiff_sequence/tomo77_092.tiff new file mode 100644 index 0000000..b230f0b Binary files /dev/null and b/data/tiff_sequence/tomo77_092.tiff differ diff --git a/data/tiff_sequence/tomo77_093.tiff b/data/tiff_sequence/tomo77_093.tiff new file mode 100644 index 0000000..948fd59 Binary files /dev/null and b/data/tiff_sequence/tomo77_093.tiff differ diff --git a/data/tiff_sequence/tomo77_094.tiff b/data/tiff_sequence/tomo77_094.tiff new file mode 100644 index 0000000..ace4773 Binary files /dev/null and b/data/tiff_sequence/tomo77_094.tiff differ diff --git a/data/tiff_sequence/tomo77_095.tiff b/data/tiff_sequence/tomo77_095.tiff new file mode 100644 index 0000000..2704975 Binary files /dev/null and b/data/tiff_sequence/tomo77_095.tiff differ diff --git a/data/tiff_sequence/tomo77_096.tiff b/data/tiff_sequence/tomo77_096.tiff new file mode 100644 index 0000000..1b5b185 Binary files /dev/null and b/data/tiff_sequence/tomo77_096.tiff differ diff --git a/data/tiff_sequence/tomo77_097.tiff b/data/tiff_sequence/tomo77_097.tiff new file mode 100644 index 0000000..ddb6ccd Binary files /dev/null and b/data/tiff_sequence/tomo77_097.tiff differ diff --git a/data/tiff_sequence/tomo77_098.tiff b/data/tiff_sequence/tomo77_098.tiff new file mode 100644 index 0000000..168d968 Binary files /dev/null and b/data/tiff_sequence/tomo77_098.tiff differ diff --git a/data/tiff_sequence/tomo77_099.tiff b/data/tiff_sequence/tomo77_099.tiff new file mode 100644 index 0000000..3ec5fe5 Binary files /dev/null and b/data/tiff_sequence/tomo77_099.tiff differ diff --git a/data/tiff_sequence/tomo77_100.tiff b/data/tiff_sequence/tomo77_100.tiff new file mode 100644 index 0000000..ab1bc1a Binary files /dev/null and b/data/tiff_sequence/tomo77_100.tiff differ diff --git a/data/tiff_sequence/tomo77_101.tiff b/data/tiff_sequence/tomo77_101.tiff new file mode 100644 index 0000000..3521377 Binary files /dev/null and b/data/tiff_sequence/tomo77_101.tiff differ diff --git a/data/tiff_sequence/tomo77_102.tiff b/data/tiff_sequence/tomo77_102.tiff new file mode 100644 index 0000000..c803a1a Binary files /dev/null and b/data/tiff_sequence/tomo77_102.tiff differ diff --git a/data/tiff_sequence/tomo77_103.tiff b/data/tiff_sequence/tomo77_103.tiff new file mode 100644 index 0000000..6184c48 Binary files /dev/null and b/data/tiff_sequence/tomo77_103.tiff differ diff --git a/data/tiff_sequence/tomo77_104.tiff b/data/tiff_sequence/tomo77_104.tiff new file mode 100644 index 0000000..1efbe75 Binary files /dev/null and b/data/tiff_sequence/tomo77_104.tiff differ diff --git a/data/tiff_sequence/tomo77_105.tiff b/data/tiff_sequence/tomo77_105.tiff new file mode 100644 index 0000000..e048733 Binary files /dev/null and b/data/tiff_sequence/tomo77_105.tiff differ diff --git a/data/tiff_sequence/tomo77_106.tiff b/data/tiff_sequence/tomo77_106.tiff new file mode 100644 index 0000000..6502c88 Binary files /dev/null and b/data/tiff_sequence/tomo77_106.tiff differ diff --git a/data/tiff_sequence/tomo77_107.tiff b/data/tiff_sequence/tomo77_107.tiff new file mode 100644 index 0000000..705343d Binary files /dev/null and b/data/tiff_sequence/tomo77_107.tiff differ diff --git a/data/tiff_sequence/tomo77_108.tiff b/data/tiff_sequence/tomo77_108.tiff new file mode 100644 index 0000000..0318662 Binary files /dev/null and b/data/tiff_sequence/tomo77_108.tiff differ diff --git a/data/tiff_sequence/tomo77_109.tiff b/data/tiff_sequence/tomo77_109.tiff new file mode 100644 index 0000000..4e93499 Binary files /dev/null and b/data/tiff_sequence/tomo77_109.tiff differ diff --git a/data/tiff_sequence/tomo77_110.tiff b/data/tiff_sequence/tomo77_110.tiff new file mode 100644 index 0000000..2937ae8 Binary files /dev/null and b/data/tiff_sequence/tomo77_110.tiff differ diff --git a/data/tiff_sequence/tomo77_111.tiff b/data/tiff_sequence/tomo77_111.tiff new file mode 100644 index 0000000..ebb304f Binary files /dev/null and b/data/tiff_sequence/tomo77_111.tiff differ diff --git a/data/tiff_sequence/tomo77_112.tiff b/data/tiff_sequence/tomo77_112.tiff new file mode 100644 index 0000000..5e23176 Binary files /dev/null and b/data/tiff_sequence/tomo77_112.tiff differ diff --git a/data/tiff_sequence/tomo77_113.tiff b/data/tiff_sequence/tomo77_113.tiff new file mode 100644 index 0000000..c9015e7 Binary files /dev/null and b/data/tiff_sequence/tomo77_113.tiff differ diff --git a/data/tiff_sequence/tomo77_114.tiff b/data/tiff_sequence/tomo77_114.tiff new file mode 100644 index 0000000..4d9a92d Binary files /dev/null and b/data/tiff_sequence/tomo77_114.tiff differ diff --git a/data/tiff_sequence/tomo77_115.tiff b/data/tiff_sequence/tomo77_115.tiff new file mode 100644 index 0000000..ef6861a Binary files /dev/null and b/data/tiff_sequence/tomo77_115.tiff differ diff --git a/data/tiff_sequence/tomo77_116.tiff b/data/tiff_sequence/tomo77_116.tiff new file mode 100644 index 0000000..a79906a Binary files /dev/null and b/data/tiff_sequence/tomo77_116.tiff differ diff --git a/data/tiff_sequence/tomo77_117.tiff b/data/tiff_sequence/tomo77_117.tiff new file mode 100644 index 0000000..18dbd67 Binary files /dev/null and b/data/tiff_sequence/tomo77_117.tiff differ diff --git a/data/tiff_sequence/tomo77_118.tiff b/data/tiff_sequence/tomo77_118.tiff new file mode 100644 index 0000000..6972240 Binary files /dev/null and b/data/tiff_sequence/tomo77_118.tiff differ diff --git a/data/tiff_sequence/tomo77_119.tiff b/data/tiff_sequence/tomo77_119.tiff new file mode 100644 index 0000000..1996bc2 Binary files /dev/null and b/data/tiff_sequence/tomo77_119.tiff differ diff --git a/data/tiff_sequence/tomo77_120.tiff b/data/tiff_sequence/tomo77_120.tiff new file mode 100644 index 0000000..195b99f Binary files /dev/null and b/data/tiff_sequence/tomo77_120.tiff differ diff --git a/data/tiff_sequence/tomo77_121.tiff b/data/tiff_sequence/tomo77_121.tiff new file mode 100644 index 0000000..68b413c Binary files /dev/null and b/data/tiff_sequence/tomo77_121.tiff differ diff --git a/data/tiff_sequence/tomo77_122.tiff b/data/tiff_sequence/tomo77_122.tiff new file mode 100644 index 0000000..21b8eff Binary files /dev/null and b/data/tiff_sequence/tomo77_122.tiff differ diff --git a/data/tiff_sequence/tomo77_123.tiff b/data/tiff_sequence/tomo77_123.tiff new file mode 100644 index 0000000..673d78c Binary files /dev/null and b/data/tiff_sequence/tomo77_123.tiff differ diff --git a/data/tiff_sequence/tomo77_124.tiff b/data/tiff_sequence/tomo77_124.tiff new file mode 100644 index 0000000..ab7fab9 Binary files /dev/null and b/data/tiff_sequence/tomo77_124.tiff differ diff --git a/data/tiff_sequence/tomo77_125.tiff b/data/tiff_sequence/tomo77_125.tiff new file mode 100644 index 0000000..47e415f Binary files /dev/null and b/data/tiff_sequence/tomo77_125.tiff differ diff --git a/data/tiff_sequence/tomo77_126.tiff b/data/tiff_sequence/tomo77_126.tiff new file mode 100644 index 0000000..35ca50e Binary files /dev/null and b/data/tiff_sequence/tomo77_126.tiff differ diff --git a/data/tiff_sequence/tomo77_127.tiff b/data/tiff_sequence/tomo77_127.tiff new file mode 100644 index 0000000..8bec67c Binary files /dev/null and b/data/tiff_sequence/tomo77_127.tiff differ diff --git a/data/tiff_sequence/tomo77_128.tiff b/data/tiff_sequence/tomo77_128.tiff new file mode 100644 index 0000000..4e97fe2 Binary files /dev/null and b/data/tiff_sequence/tomo77_128.tiff differ diff --git a/data/tiff_sequence/tomo77_129.tiff b/data/tiff_sequence/tomo77_129.tiff new file mode 100644 index 0000000..e101b01 Binary files /dev/null and b/data/tiff_sequence/tomo77_129.tiff differ diff --git a/data/tiff_sequence/tomo77_130.tiff b/data/tiff_sequence/tomo77_130.tiff new file mode 100644 index 0000000..4b0a9ff Binary files /dev/null and b/data/tiff_sequence/tomo77_130.tiff differ diff --git a/data/tiff_sequence/tomo77_131.tiff b/data/tiff_sequence/tomo77_131.tiff new file mode 100644 index 0000000..ea639f1 Binary files /dev/null and b/data/tiff_sequence/tomo77_131.tiff differ diff --git a/data/tiff_sequence/tomo77_132.tiff b/data/tiff_sequence/tomo77_132.tiff new file mode 100644 index 0000000..ea1be46 Binary files /dev/null and b/data/tiff_sequence/tomo77_132.tiff differ diff --git a/data/tiff_sequence/tomo77_133.tiff b/data/tiff_sequence/tomo77_133.tiff new file mode 100644 index 0000000..30e7353 Binary files /dev/null and b/data/tiff_sequence/tomo77_133.tiff differ diff --git a/data/tiff_sequence/tomo77_134.tiff b/data/tiff_sequence/tomo77_134.tiff new file mode 100644 index 0000000..610c0c1 Binary files /dev/null and b/data/tiff_sequence/tomo77_134.tiff differ diff --git a/data/tiff_sequence/tomo77_135.tiff b/data/tiff_sequence/tomo77_135.tiff new file mode 100644 index 0000000..01d5cf3 Binary files /dev/null and b/data/tiff_sequence/tomo77_135.tiff differ diff --git a/data/tiff_sequence/tomo77_136.tiff b/data/tiff_sequence/tomo77_136.tiff new file mode 100644 index 0000000..5eaebec Binary files /dev/null and b/data/tiff_sequence/tomo77_136.tiff differ diff --git a/data/tiff_sequence/tomo77_137.tiff b/data/tiff_sequence/tomo77_137.tiff new file mode 100644 index 0000000..d3a5e2e Binary files /dev/null and b/data/tiff_sequence/tomo77_137.tiff differ diff --git a/data/tiff_sequence/tomo77_138.tiff b/data/tiff_sequence/tomo77_138.tiff new file mode 100644 index 0000000..5b10f58 Binary files /dev/null and b/data/tiff_sequence/tomo77_138.tiff differ diff --git a/data/tiff_sequence/tomo77_139.tiff b/data/tiff_sequence/tomo77_139.tiff new file mode 100644 index 0000000..dc23373 Binary files /dev/null and b/data/tiff_sequence/tomo77_139.tiff differ diff --git a/data/tiff_sequence/tomo77_140.tiff b/data/tiff_sequence/tomo77_140.tiff new file mode 100644 index 0000000..398a20b Binary files /dev/null and b/data/tiff_sequence/tomo77_140.tiff differ diff --git a/data/tiff_sequence/tomo77_141.tiff b/data/tiff_sequence/tomo77_141.tiff new file mode 100644 index 0000000..e2f6658 Binary files /dev/null and b/data/tiff_sequence/tomo77_141.tiff differ diff --git a/data/tiff_sequence/tomo77_142.tiff b/data/tiff_sequence/tomo77_142.tiff new file mode 100644 index 0000000..cd5ceab Binary files /dev/null and b/data/tiff_sequence/tomo77_142.tiff differ diff --git a/data/tiff_sequence/tomo77_143.tiff b/data/tiff_sequence/tomo77_143.tiff new file mode 100644 index 0000000..bb7aa8c Binary files /dev/null and b/data/tiff_sequence/tomo77_143.tiff differ diff --git a/data/tiff_sequence/tomo77_144.tiff b/data/tiff_sequence/tomo77_144.tiff new file mode 100644 index 0000000..5c2f551 Binary files /dev/null and b/data/tiff_sequence/tomo77_144.tiff differ diff --git a/data/tiff_sequence/tomo77_145.tiff b/data/tiff_sequence/tomo77_145.tiff new file mode 100644 index 0000000..bdd5841 Binary files /dev/null and b/data/tiff_sequence/tomo77_145.tiff differ diff --git a/data/tiff_sequence/tomo77_146.tiff b/data/tiff_sequence/tomo77_146.tiff new file mode 100644 index 0000000..136bef7 Binary files /dev/null and b/data/tiff_sequence/tomo77_146.tiff differ diff --git a/data/tiff_sequence/tomo77_147.tiff b/data/tiff_sequence/tomo77_147.tiff new file mode 100644 index 0000000..a9588aa Binary files /dev/null and b/data/tiff_sequence/tomo77_147.tiff differ diff --git a/data/tiff_sequence/tomo77_148.tiff b/data/tiff_sequence/tomo77_148.tiff new file mode 100644 index 0000000..64119f2 Binary files /dev/null and b/data/tiff_sequence/tomo77_148.tiff differ diff --git a/data/tiff_sequence/tomo77_149.tiff b/data/tiff_sequence/tomo77_149.tiff new file mode 100644 index 0000000..8393daf Binary files /dev/null and b/data/tiff_sequence/tomo77_149.tiff differ diff --git a/data/tiff_sequence/tomo77_150.tiff b/data/tiff_sequence/tomo77_150.tiff new file mode 100644 index 0000000..437a8ac Binary files /dev/null and b/data/tiff_sequence/tomo77_150.tiff differ diff --git a/data/tiff_sequence/tomo77_151.tiff b/data/tiff_sequence/tomo77_151.tiff new file mode 100644 index 0000000..cda9d2b Binary files /dev/null and b/data/tiff_sequence/tomo77_151.tiff differ diff --git a/data/tiff_sequence/tomo77_152.tiff b/data/tiff_sequence/tomo77_152.tiff new file mode 100644 index 0000000..bc24000 Binary files /dev/null and b/data/tiff_sequence/tomo77_152.tiff differ diff --git a/data/tiff_sequence/tomo77_153.tiff b/data/tiff_sequence/tomo77_153.tiff new file mode 100644 index 0000000..c8a6c6e Binary files /dev/null and b/data/tiff_sequence/tomo77_153.tiff differ diff --git a/data/tiff_sequence/tomo77_154.tiff b/data/tiff_sequence/tomo77_154.tiff new file mode 100644 index 0000000..d205258 Binary files /dev/null and b/data/tiff_sequence/tomo77_154.tiff differ diff --git a/data/tiff_sequence/tomo77_155.tiff b/data/tiff_sequence/tomo77_155.tiff new file mode 100644 index 0000000..51090dd Binary files /dev/null and b/data/tiff_sequence/tomo77_155.tiff differ diff --git a/data/tiff_sequence/tomo77_156.tiff b/data/tiff_sequence/tomo77_156.tiff new file mode 100644 index 0000000..2960144 Binary files /dev/null and b/data/tiff_sequence/tomo77_156.tiff differ diff --git a/data/tiff_sequence/tomo77_157.tiff b/data/tiff_sequence/tomo77_157.tiff new file mode 100644 index 0000000..0eaaf74 Binary files /dev/null and b/data/tiff_sequence/tomo77_157.tiff differ diff --git a/data/tiff_sequence/tomo77_158.tiff b/data/tiff_sequence/tomo77_158.tiff new file mode 100644 index 0000000..782ea86 Binary files /dev/null and b/data/tiff_sequence/tomo77_158.tiff differ diff --git a/data/tiff_sequence/tomo77_159.tiff b/data/tiff_sequence/tomo77_159.tiff new file mode 100644 index 0000000..a886501 Binary files /dev/null and b/data/tiff_sequence/tomo77_159.tiff differ diff --git a/data/tiff_sequence/tomo77_160.tiff b/data/tiff_sequence/tomo77_160.tiff new file mode 100644 index 0000000..057c412 Binary files /dev/null and b/data/tiff_sequence/tomo77_160.tiff differ diff --git a/data/tiff_sequence/tomo77_161.tiff b/data/tiff_sequence/tomo77_161.tiff new file mode 100644 index 0000000..dc48a21 Binary files /dev/null and b/data/tiff_sequence/tomo77_161.tiff differ diff --git a/data/tiff_sequence/tomo77_162.tiff b/data/tiff_sequence/tomo77_162.tiff new file mode 100644 index 0000000..c10c85c Binary files /dev/null and b/data/tiff_sequence/tomo77_162.tiff differ diff --git a/data/tiff_sequence/tomo77_163.tiff b/data/tiff_sequence/tomo77_163.tiff new file mode 100644 index 0000000..58d1204 Binary files /dev/null and b/data/tiff_sequence/tomo77_163.tiff differ diff --git a/data/tiff_sequence/tomo77_164.tiff b/data/tiff_sequence/tomo77_164.tiff new file mode 100644 index 0000000..e7ffbc0 Binary files /dev/null and b/data/tiff_sequence/tomo77_164.tiff differ diff --git a/data/tiff_sequence/tomo77_165.tiff b/data/tiff_sequence/tomo77_165.tiff new file mode 100644 index 0000000..06c8947 Binary files /dev/null and b/data/tiff_sequence/tomo77_165.tiff differ diff --git a/data/tiff_sequence/tomo77_166.tiff b/data/tiff_sequence/tomo77_166.tiff new file mode 100644 index 0000000..e4f6a92 Binary files /dev/null and b/data/tiff_sequence/tomo77_166.tiff differ diff --git a/data/tiff_sequence/tomo77_167.tiff b/data/tiff_sequence/tomo77_167.tiff new file mode 100644 index 0000000..20204e7 Binary files /dev/null and b/data/tiff_sequence/tomo77_167.tiff differ diff --git a/data/tiff_sequence/tomo77_168.tiff b/data/tiff_sequence/tomo77_168.tiff new file mode 100644 index 0000000..053e604 Binary files /dev/null and b/data/tiff_sequence/tomo77_168.tiff differ diff --git a/data/tiff_sequence/tomo77_169.tiff b/data/tiff_sequence/tomo77_169.tiff new file mode 100644 index 0000000..c4f6aec Binary files /dev/null and b/data/tiff_sequence/tomo77_169.tiff differ diff --git a/data/tiff_sequence/tomo77_170.tiff b/data/tiff_sequence/tomo77_170.tiff new file mode 100644 index 0000000..9589d10 Binary files /dev/null and b/data/tiff_sequence/tomo77_170.tiff differ diff --git a/data/tiff_sequence/tomo77_171.tiff b/data/tiff_sequence/tomo77_171.tiff new file mode 100644 index 0000000..6bbaffe Binary files /dev/null and b/data/tiff_sequence/tomo77_171.tiff differ diff --git a/data/tiff_sequence/tomo77_172.tiff b/data/tiff_sequence/tomo77_172.tiff new file mode 100644 index 0000000..c577538 Binary files /dev/null and b/data/tiff_sequence/tomo77_172.tiff differ diff --git a/data/tiff_sequence/tomo77_173.tiff b/data/tiff_sequence/tomo77_173.tiff new file mode 100644 index 0000000..6178e5f Binary files /dev/null and b/data/tiff_sequence/tomo77_173.tiff differ diff --git a/data/tiff_sequence/tomo77_174.tiff b/data/tiff_sequence/tomo77_174.tiff new file mode 100644 index 0000000..c49040c Binary files /dev/null and b/data/tiff_sequence/tomo77_174.tiff differ diff --git a/data/tiff_sequence/tomo77_175.tiff b/data/tiff_sequence/tomo77_175.tiff new file mode 100644 index 0000000..105b697 Binary files /dev/null and b/data/tiff_sequence/tomo77_175.tiff differ diff --git a/data/tiff_sequence/tomo77_176.tiff b/data/tiff_sequence/tomo77_176.tiff new file mode 100644 index 0000000..d4802ca Binary files /dev/null and b/data/tiff_sequence/tomo77_176.tiff differ diff --git a/data/tiff_sequence/tomo77_177.tiff b/data/tiff_sequence/tomo77_177.tiff new file mode 100644 index 0000000..2d88161 Binary files /dev/null and b/data/tiff_sequence/tomo77_177.tiff differ diff --git a/data/tiff_sequence/tomo77_178.tiff b/data/tiff_sequence/tomo77_178.tiff new file mode 100644 index 0000000..4e8b37c Binary files /dev/null and b/data/tiff_sequence/tomo77_178.tiff differ diff --git a/data/tiff_sequence/tomo77_179.tiff b/data/tiff_sequence/tomo77_179.tiff new file mode 100644 index 0000000..6f04337 Binary files /dev/null and b/data/tiff_sequence/tomo77_179.tiff differ diff --git a/data/tiff_sequence/tomo77_180.tiff b/data/tiff_sequence/tomo77_180.tiff new file mode 100644 index 0000000..a598ae1 Binary files /dev/null and b/data/tiff_sequence/tomo77_180.tiff differ diff --git a/data/tiff_sequence/tomo77_181.tiff b/data/tiff_sequence/tomo77_181.tiff new file mode 100644 index 0000000..4bd7161 Binary files /dev/null and b/data/tiff_sequence/tomo77_181.tiff differ diff --git a/data/tiff_sequence/tomo77_182.tiff b/data/tiff_sequence/tomo77_182.tiff new file mode 100644 index 0000000..7b1b72b Binary files /dev/null and b/data/tiff_sequence/tomo77_182.tiff differ diff --git a/data/tiff_sequence/tomo77_183.tiff b/data/tiff_sequence/tomo77_183.tiff new file mode 100644 index 0000000..a094f57 Binary files /dev/null and b/data/tiff_sequence/tomo77_183.tiff differ diff --git a/data/tiff_sequence/tomo77_184.tiff b/data/tiff_sequence/tomo77_184.tiff new file mode 100644 index 0000000..67b1a8a Binary files /dev/null and b/data/tiff_sequence/tomo77_184.tiff differ diff --git a/data/tiff_sequence/tomo77_185.tiff b/data/tiff_sequence/tomo77_185.tiff new file mode 100644 index 0000000..52b730c Binary files /dev/null and b/data/tiff_sequence/tomo77_185.tiff differ diff --git a/data/tiff_sequence/tomo77_186.tiff b/data/tiff_sequence/tomo77_186.tiff new file mode 100644 index 0000000..fccd874 Binary files /dev/null and b/data/tiff_sequence/tomo77_186.tiff differ diff --git a/data/tiff_sequence/tomo77_187.tiff b/data/tiff_sequence/tomo77_187.tiff new file mode 100644 index 0000000..3a9d82d Binary files /dev/null and b/data/tiff_sequence/tomo77_187.tiff differ diff --git a/data/tiff_sequence/tomo77_188.tiff b/data/tiff_sequence/tomo77_188.tiff new file mode 100644 index 0000000..e26afe2 Binary files /dev/null and b/data/tiff_sequence/tomo77_188.tiff differ diff --git a/data/tiff_sequence/tomo77_189.tiff b/data/tiff_sequence/tomo77_189.tiff new file mode 100644 index 0000000..3d369ab Binary files /dev/null and b/data/tiff_sequence/tomo77_189.tiff differ diff --git a/data/tiff_sequence/tomo77_190.tiff b/data/tiff_sequence/tomo77_190.tiff new file mode 100644 index 0000000..8d90781 Binary files /dev/null and b/data/tiff_sequence/tomo77_190.tiff differ diff --git a/data/tiff_sequence/tomo77_191.tiff b/data/tiff_sequence/tomo77_191.tiff new file mode 100644 index 0000000..80d5cd0 Binary files /dev/null and b/data/tiff_sequence/tomo77_191.tiff differ diff --git a/data/tiff_sequence/tomo77_192.tiff b/data/tiff_sequence/tomo77_192.tiff new file mode 100644 index 0000000..4de3238 Binary files /dev/null and b/data/tiff_sequence/tomo77_192.tiff differ diff --git a/data/tiff_sequence/tomo77_193.tiff b/data/tiff_sequence/tomo77_193.tiff new file mode 100644 index 0000000..4dfe0a9 Binary files /dev/null and b/data/tiff_sequence/tomo77_193.tiff differ diff --git a/data/tiff_sequence/tomo77_194.tiff b/data/tiff_sequence/tomo77_194.tiff new file mode 100644 index 0000000..e12b975 Binary files /dev/null and b/data/tiff_sequence/tomo77_194.tiff differ diff --git a/data/tiff_sequence/tomo77_195.tiff b/data/tiff_sequence/tomo77_195.tiff new file mode 100644 index 0000000..bf97c38 Binary files /dev/null and b/data/tiff_sequence/tomo77_195.tiff differ diff --git a/data/tiff_sequence/tomo77_196.tiff b/data/tiff_sequence/tomo77_196.tiff new file mode 100644 index 0000000..52716da Binary files /dev/null and b/data/tiff_sequence/tomo77_196.tiff differ diff --git a/data/tiff_sequence/tomo77_197.tiff b/data/tiff_sequence/tomo77_197.tiff new file mode 100644 index 0000000..2a2a716 Binary files /dev/null and b/data/tiff_sequence/tomo77_197.tiff differ diff --git a/data/tiff_sequence/tomo77_198.tiff b/data/tiff_sequence/tomo77_198.tiff new file mode 100644 index 0000000..7f18fc5 Binary files /dev/null and b/data/tiff_sequence/tomo77_198.tiff differ diff --git a/data/tiff_sequence/tomo77_199.tiff b/data/tiff_sequence/tomo77_199.tiff new file mode 100644 index 0000000..5689fae Binary files /dev/null and b/data/tiff_sequence/tomo77_199.tiff differ diff --git a/data/tiff_sequence/tomo77_200.tiff b/data/tiff_sequence/tomo77_200.tiff new file mode 100644 index 0000000..feea441 Binary files /dev/null and b/data/tiff_sequence/tomo77_200.tiff differ diff --git a/data/tiff_sequence/tomo77_201.tiff b/data/tiff_sequence/tomo77_201.tiff new file mode 100644 index 0000000..ae9dd39 Binary files /dev/null and b/data/tiff_sequence/tomo77_201.tiff differ diff --git a/data/tiff_sequence/tomo77_202.tiff b/data/tiff_sequence/tomo77_202.tiff new file mode 100644 index 0000000..72bc566 Binary files /dev/null and b/data/tiff_sequence/tomo77_202.tiff differ diff --git a/data/tiff_sequence/tomo77_203.tiff b/data/tiff_sequence/tomo77_203.tiff new file mode 100644 index 0000000..2f483bd Binary files /dev/null and b/data/tiff_sequence/tomo77_203.tiff differ diff --git a/data/tiff_sequence/tomo77_204.tiff b/data/tiff_sequence/tomo77_204.tiff new file mode 100644 index 0000000..99a92bc Binary files /dev/null and b/data/tiff_sequence/tomo77_204.tiff differ diff --git a/data/tiff_sequence/tomo77_205.tiff b/data/tiff_sequence/tomo77_205.tiff new file mode 100644 index 0000000..ccdc1e7 Binary files /dev/null and b/data/tiff_sequence/tomo77_205.tiff differ diff --git a/data/tiff_sequence/tomo77_206.tiff b/data/tiff_sequence/tomo77_206.tiff new file mode 100644 index 0000000..409e8fd Binary files /dev/null and b/data/tiff_sequence/tomo77_206.tiff differ diff --git a/data/tiff_sequence/tomo77_207.tiff b/data/tiff_sequence/tomo77_207.tiff new file mode 100644 index 0000000..268f552 Binary files /dev/null and b/data/tiff_sequence/tomo77_207.tiff differ diff --git a/data/tiff_sequence/tomo77_208.tiff b/data/tiff_sequence/tomo77_208.tiff new file mode 100644 index 0000000..5643d6d Binary files /dev/null and b/data/tiff_sequence/tomo77_208.tiff differ diff --git a/data/tiff_sequence/tomo77_209.tiff b/data/tiff_sequence/tomo77_209.tiff new file mode 100644 index 0000000..d1bc886 Binary files /dev/null and b/data/tiff_sequence/tomo77_209.tiff differ diff --git a/data/tiff_sequence/tomo77_210.tiff b/data/tiff_sequence/tomo77_210.tiff new file mode 100644 index 0000000..680c4e0 Binary files /dev/null and b/data/tiff_sequence/tomo77_210.tiff differ diff --git a/data/tiff_sequence/tomo77_211.tiff b/data/tiff_sequence/tomo77_211.tiff new file mode 100644 index 0000000..cfbbd9b Binary files /dev/null and b/data/tiff_sequence/tomo77_211.tiff differ diff --git a/data/tiff_sequence/tomo77_212.tiff b/data/tiff_sequence/tomo77_212.tiff new file mode 100644 index 0000000..c291eb4 Binary files /dev/null and b/data/tiff_sequence/tomo77_212.tiff differ diff --git a/data/tiff_sequence/tomo77_213.tiff b/data/tiff_sequence/tomo77_213.tiff new file mode 100644 index 0000000..3974e07 Binary files /dev/null and b/data/tiff_sequence/tomo77_213.tiff differ diff --git a/data/tiff_sequence/tomo77_214.tiff b/data/tiff_sequence/tomo77_214.tiff new file mode 100644 index 0000000..c00ad6d Binary files /dev/null and b/data/tiff_sequence/tomo77_214.tiff differ diff --git a/data/tiff_sequence/tomo77_215.tiff b/data/tiff_sequence/tomo77_215.tiff new file mode 100644 index 0000000..d9f5d81 Binary files /dev/null and b/data/tiff_sequence/tomo77_215.tiff differ diff --git a/data/tiff_sequence/tomo77_216.tiff b/data/tiff_sequence/tomo77_216.tiff new file mode 100644 index 0000000..8ae6c5f Binary files /dev/null and b/data/tiff_sequence/tomo77_216.tiff differ diff --git a/data/tiff_sequence/tomo77_217.tiff b/data/tiff_sequence/tomo77_217.tiff new file mode 100644 index 0000000..e04dbd7 Binary files /dev/null and b/data/tiff_sequence/tomo77_217.tiff differ diff --git a/data/tiff_sequence/tomo77_218.tiff b/data/tiff_sequence/tomo77_218.tiff new file mode 100644 index 0000000..30c1f45 Binary files /dev/null and b/data/tiff_sequence/tomo77_218.tiff differ diff --git a/data/tiff_sequence/tomo77_219.tiff b/data/tiff_sequence/tomo77_219.tiff new file mode 100644 index 0000000..01d6131 Binary files /dev/null and b/data/tiff_sequence/tomo77_219.tiff differ diff --git a/data/tiff_sequence/tomo77_220.tiff b/data/tiff_sequence/tomo77_220.tiff new file mode 100644 index 0000000..a4ef303 Binary files /dev/null and b/data/tiff_sequence/tomo77_220.tiff differ diff --git a/data/tiff_sequence/tomo77_221.tiff b/data/tiff_sequence/tomo77_221.tiff new file mode 100644 index 0000000..3446655 Binary files /dev/null and b/data/tiff_sequence/tomo77_221.tiff differ diff --git a/data/tiff_sequence/tomo77_222.tiff b/data/tiff_sequence/tomo77_222.tiff new file mode 100644 index 0000000..2ab4a98 Binary files /dev/null and b/data/tiff_sequence/tomo77_222.tiff differ diff --git a/data/tiff_sequence/tomo77_223.tiff b/data/tiff_sequence/tomo77_223.tiff new file mode 100644 index 0000000..a1241d4 Binary files /dev/null and b/data/tiff_sequence/tomo77_223.tiff differ diff --git a/data/tiff_sequence/tomo77_224.tiff b/data/tiff_sequence/tomo77_224.tiff new file mode 100644 index 0000000..9789383 Binary files /dev/null and b/data/tiff_sequence/tomo77_224.tiff differ diff --git a/data/tiff_sequence/tomo77_225.tiff b/data/tiff_sequence/tomo77_225.tiff new file mode 100644 index 0000000..5700253 Binary files /dev/null and b/data/tiff_sequence/tomo77_225.tiff differ diff --git a/data/tiff_sequence/tomo77_226.tiff b/data/tiff_sequence/tomo77_226.tiff new file mode 100644 index 0000000..3f31d00 Binary files /dev/null and b/data/tiff_sequence/tomo77_226.tiff differ diff --git a/data/tiff_sequence/tomo77_227.tiff b/data/tiff_sequence/tomo77_227.tiff new file mode 100644 index 0000000..15c3cd6 Binary files /dev/null and b/data/tiff_sequence/tomo77_227.tiff differ diff --git a/data/tiff_sequence/tomo77_228.tiff b/data/tiff_sequence/tomo77_228.tiff new file mode 100644 index 0000000..9df2375 Binary files /dev/null and b/data/tiff_sequence/tomo77_228.tiff differ diff --git a/data/tiff_sequence/tomo77_229.tiff b/data/tiff_sequence/tomo77_229.tiff new file mode 100644 index 0000000..fd7e635 Binary files /dev/null and b/data/tiff_sequence/tomo77_229.tiff differ diff --git a/data/tiff_sequence/tomo77_230.tiff b/data/tiff_sequence/tomo77_230.tiff new file mode 100644 index 0000000..d8f529c Binary files /dev/null and b/data/tiff_sequence/tomo77_230.tiff differ diff --git a/data/tiff_sequence/tomo77_231.tiff b/data/tiff_sequence/tomo77_231.tiff new file mode 100644 index 0000000..c80ee88 Binary files /dev/null and b/data/tiff_sequence/tomo77_231.tiff differ diff --git a/data/tiff_sequence/tomo77_232.tiff b/data/tiff_sequence/tomo77_232.tiff new file mode 100644 index 0000000..7b61716 Binary files /dev/null and b/data/tiff_sequence/tomo77_232.tiff differ diff --git a/data/tiff_sequence/tomo77_233.tiff b/data/tiff_sequence/tomo77_233.tiff new file mode 100644 index 0000000..a6ee8a7 Binary files /dev/null and b/data/tiff_sequence/tomo77_233.tiff differ diff --git a/data/tiff_sequence/tomo77_234.tiff b/data/tiff_sequence/tomo77_234.tiff new file mode 100644 index 0000000..9b4f280 Binary files /dev/null and b/data/tiff_sequence/tomo77_234.tiff differ diff --git a/data/tiff_sequence/tomo77_235.tiff b/data/tiff_sequence/tomo77_235.tiff new file mode 100644 index 0000000..d98b38f Binary files /dev/null and b/data/tiff_sequence/tomo77_235.tiff differ diff --git a/data/tiff_sequence/tomo77_236.tiff b/data/tiff_sequence/tomo77_236.tiff new file mode 100644 index 0000000..d803637 Binary files /dev/null and b/data/tiff_sequence/tomo77_236.tiff differ diff --git a/data/tiff_sequence/tomo77_237.tiff b/data/tiff_sequence/tomo77_237.tiff new file mode 100644 index 0000000..5fafecf Binary files /dev/null and b/data/tiff_sequence/tomo77_237.tiff differ diff --git a/data/tiff_sequence/tomo77_238.tiff b/data/tiff_sequence/tomo77_238.tiff new file mode 100644 index 0000000..33a265f Binary files /dev/null and b/data/tiff_sequence/tomo77_238.tiff differ diff --git a/data/tiff_sequence/tomo77_239.tiff b/data/tiff_sequence/tomo77_239.tiff new file mode 100644 index 0000000..badab39 Binary files /dev/null and b/data/tiff_sequence/tomo77_239.tiff differ diff --git a/data/tiff_sequence/tomo77_240.tiff b/data/tiff_sequence/tomo77_240.tiff new file mode 100644 index 0000000..b298915 Binary files /dev/null and b/data/tiff_sequence/tomo77_240.tiff differ diff --git a/data/tiff_sequence/tomo77_241.tiff b/data/tiff_sequence/tomo77_241.tiff new file mode 100644 index 0000000..49d4f9b Binary files /dev/null and b/data/tiff_sequence/tomo77_241.tiff differ diff --git a/data/tiff_sequence/tomo77_242.tiff b/data/tiff_sequence/tomo77_242.tiff new file mode 100644 index 0000000..94c5a47 Binary files /dev/null and b/data/tiff_sequence/tomo77_242.tiff differ diff --git a/data/tiff_sequence/tomo77_243.tiff b/data/tiff_sequence/tomo77_243.tiff new file mode 100644 index 0000000..781b3bb Binary files /dev/null and b/data/tiff_sequence/tomo77_243.tiff differ diff --git a/data/tiff_sequence/tomo77_244.tiff b/data/tiff_sequence/tomo77_244.tiff new file mode 100644 index 0000000..d78b25a Binary files /dev/null and b/data/tiff_sequence/tomo77_244.tiff differ diff --git a/data/tiff_sequence/tomo77_245.tiff b/data/tiff_sequence/tomo77_245.tiff new file mode 100644 index 0000000..5b88793 Binary files /dev/null and b/data/tiff_sequence/tomo77_245.tiff differ diff --git a/data/tiff_sequence/tomo77_246.tiff b/data/tiff_sequence/tomo77_246.tiff new file mode 100644 index 0000000..5540cd2 Binary files /dev/null and b/data/tiff_sequence/tomo77_246.tiff differ diff --git a/data/tiff_sequence/tomo77_247.tiff b/data/tiff_sequence/tomo77_247.tiff new file mode 100644 index 0000000..f61b2eb Binary files /dev/null and b/data/tiff_sequence/tomo77_247.tiff differ diff --git a/data/tiff_sequence/tomo77_248.tiff b/data/tiff_sequence/tomo77_248.tiff new file mode 100644 index 0000000..7d7c48a Binary files /dev/null and b/data/tiff_sequence/tomo77_248.tiff differ diff --git a/data/tiff_sequence/tomo77_249.tiff b/data/tiff_sequence/tomo77_249.tiff new file mode 100644 index 0000000..df7c3d7 Binary files /dev/null and b/data/tiff_sequence/tomo77_249.tiff differ diff --git a/data/tiff_sequence/tomo77_250.tiff b/data/tiff_sequence/tomo77_250.tiff new file mode 100644 index 0000000..a86ce76 Binary files /dev/null and b/data/tiff_sequence/tomo77_250.tiff differ diff --git a/data/tiff_sequence/tomo77_251.tiff b/data/tiff_sequence/tomo77_251.tiff new file mode 100644 index 0000000..ddb1da4 Binary files /dev/null and b/data/tiff_sequence/tomo77_251.tiff differ diff --git a/data/tiff_sequence/tomo77_252.tiff b/data/tiff_sequence/tomo77_252.tiff new file mode 100644 index 0000000..b832c1e Binary files /dev/null and b/data/tiff_sequence/tomo77_252.tiff differ diff --git a/data/tiff_sequence/tomo77_253.tiff b/data/tiff_sequence/tomo77_253.tiff new file mode 100644 index 0000000..35ca056 Binary files /dev/null and b/data/tiff_sequence/tomo77_253.tiff differ diff --git a/data/tiff_sequence/tomo77_254.tiff b/data/tiff_sequence/tomo77_254.tiff new file mode 100644 index 0000000..aded507 Binary files /dev/null and b/data/tiff_sequence/tomo77_254.tiff differ diff --git a/data/tiff_sequence/tomo77_255.tiff b/data/tiff_sequence/tomo77_255.tiff new file mode 100644 index 0000000..f2c1ef8 Binary files /dev/null and b/data/tiff_sequence/tomo77_255.tiff differ diff --git a/data/tiff_sequence/tomo77_256.tiff b/data/tiff_sequence/tomo77_256.tiff new file mode 100644 index 0000000..0cb0012 Binary files /dev/null and b/data/tiff_sequence/tomo77_256.tiff differ diff --git a/data/tiff_sequence/tomo77_257.tiff b/data/tiff_sequence/tomo77_257.tiff new file mode 100644 index 0000000..3e4e9ae Binary files /dev/null and b/data/tiff_sequence/tomo77_257.tiff differ diff --git a/data/tiff_sequence/tomo77_258.tiff b/data/tiff_sequence/tomo77_258.tiff new file mode 100644 index 0000000..f06979d Binary files /dev/null and b/data/tiff_sequence/tomo77_258.tiff differ diff --git a/data/tiff_sequence/tomo77_259.tiff b/data/tiff_sequence/tomo77_259.tiff new file mode 100644 index 0000000..dacd9fb Binary files /dev/null and b/data/tiff_sequence/tomo77_259.tiff differ diff --git a/data/tiff_sequence/tomo77_260.tiff b/data/tiff_sequence/tomo77_260.tiff new file mode 100644 index 0000000..6b083f3 Binary files /dev/null and b/data/tiff_sequence/tomo77_260.tiff differ diff --git a/data/tiff_sequence/tomo77_261.tiff b/data/tiff_sequence/tomo77_261.tiff new file mode 100644 index 0000000..9a7afe7 Binary files /dev/null and b/data/tiff_sequence/tomo77_261.tiff differ diff --git a/data/tiff_sequence/tomo77_262.tiff b/data/tiff_sequence/tomo77_262.tiff new file mode 100644 index 0000000..a857389 Binary files /dev/null and b/data/tiff_sequence/tomo77_262.tiff differ diff --git a/data/tiff_sequence/tomo77_263.tiff b/data/tiff_sequence/tomo77_263.tiff new file mode 100644 index 0000000..f1b576f Binary files /dev/null and b/data/tiff_sequence/tomo77_263.tiff differ diff --git a/data/tiff_sequence/tomo77_264.tiff b/data/tiff_sequence/tomo77_264.tiff new file mode 100644 index 0000000..12600e9 Binary files /dev/null and b/data/tiff_sequence/tomo77_264.tiff differ diff --git a/data/tiff_sequence/tomo77_265.tiff b/data/tiff_sequence/tomo77_265.tiff new file mode 100644 index 0000000..9cc56ce Binary files /dev/null and b/data/tiff_sequence/tomo77_265.tiff differ diff --git a/data/tiff_sequence/tomo77_266.tiff b/data/tiff_sequence/tomo77_266.tiff new file mode 100644 index 0000000..c3d0854 Binary files /dev/null and b/data/tiff_sequence/tomo77_266.tiff differ diff --git a/data/tiff_sequence/tomo77_267.tiff b/data/tiff_sequence/tomo77_267.tiff new file mode 100644 index 0000000..5ade878 Binary files /dev/null and b/data/tiff_sequence/tomo77_267.tiff differ diff --git a/data/tiff_sequence/tomo77_268.tiff b/data/tiff_sequence/tomo77_268.tiff new file mode 100644 index 0000000..5903fc5 Binary files /dev/null and b/data/tiff_sequence/tomo77_268.tiff differ diff --git a/data/tiff_sequence/tomo77_269.tiff b/data/tiff_sequence/tomo77_269.tiff new file mode 100644 index 0000000..bfa790a Binary files /dev/null and b/data/tiff_sequence/tomo77_269.tiff differ diff --git a/data/tiff_sequence/tomo77_270.tiff b/data/tiff_sequence/tomo77_270.tiff new file mode 100644 index 0000000..ad6bf19 Binary files /dev/null and b/data/tiff_sequence/tomo77_270.tiff differ diff --git a/data/tiff_sequence/tomo77_271.tiff b/data/tiff_sequence/tomo77_271.tiff new file mode 100644 index 0000000..33dac47 Binary files /dev/null and b/data/tiff_sequence/tomo77_271.tiff differ diff --git a/data/tiff_sequence/tomo77_272.tiff b/data/tiff_sequence/tomo77_272.tiff new file mode 100644 index 0000000..562231b Binary files /dev/null and b/data/tiff_sequence/tomo77_272.tiff differ diff --git a/data/tiff_sequence/tomo77_273.tiff b/data/tiff_sequence/tomo77_273.tiff new file mode 100644 index 0000000..923d764 Binary files /dev/null and b/data/tiff_sequence/tomo77_273.tiff differ diff --git a/data/tiff_sequence/tomo77_274.tiff b/data/tiff_sequence/tomo77_274.tiff new file mode 100644 index 0000000..3def0ca Binary files /dev/null and b/data/tiff_sequence/tomo77_274.tiff differ diff --git a/data/tiff_sequence/tomo77_275.tiff b/data/tiff_sequence/tomo77_275.tiff new file mode 100644 index 0000000..858a5d0 Binary files /dev/null and b/data/tiff_sequence/tomo77_275.tiff differ diff --git a/data/tiff_sequence/tomo77_276.tiff b/data/tiff_sequence/tomo77_276.tiff new file mode 100644 index 0000000..3b8a3a0 Binary files /dev/null and b/data/tiff_sequence/tomo77_276.tiff differ diff --git a/data/tiff_sequence/tomo77_277.tiff b/data/tiff_sequence/tomo77_277.tiff new file mode 100644 index 0000000..ff8bfa3 Binary files /dev/null and b/data/tiff_sequence/tomo77_277.tiff differ diff --git a/data/tiff_sequence/tomo77_278.tiff b/data/tiff_sequence/tomo77_278.tiff new file mode 100644 index 0000000..716751f Binary files /dev/null and b/data/tiff_sequence/tomo77_278.tiff differ diff --git a/data/tiff_sequence/tomo77_279.tiff b/data/tiff_sequence/tomo77_279.tiff new file mode 100644 index 0000000..d684c65 Binary files /dev/null and b/data/tiff_sequence/tomo77_279.tiff differ diff --git a/data/tiff_sequence/tomo77_280.tiff b/data/tiff_sequence/tomo77_280.tiff new file mode 100644 index 0000000..2418709 Binary files /dev/null and b/data/tiff_sequence/tomo77_280.tiff differ diff --git a/data/tiff_sequence/tomo77_281.tiff b/data/tiff_sequence/tomo77_281.tiff new file mode 100644 index 0000000..81e34bb Binary files /dev/null and b/data/tiff_sequence/tomo77_281.tiff differ diff --git a/data/tiff_sequence/tomo77_282.tiff b/data/tiff_sequence/tomo77_282.tiff new file mode 100644 index 0000000..421cc98 Binary files /dev/null and b/data/tiff_sequence/tomo77_282.tiff differ diff --git a/data/tiff_sequence/tomo77_283.tiff b/data/tiff_sequence/tomo77_283.tiff new file mode 100644 index 0000000..d3e5324 Binary files /dev/null and b/data/tiff_sequence/tomo77_283.tiff differ diff --git a/data/tiff_sequence/tomo77_284.tiff b/data/tiff_sequence/tomo77_284.tiff new file mode 100644 index 0000000..8d58e44 Binary files /dev/null and b/data/tiff_sequence/tomo77_284.tiff differ diff --git a/data/tiff_sequence/tomo77_285.tiff b/data/tiff_sequence/tomo77_285.tiff new file mode 100644 index 0000000..d7f65d8 Binary files /dev/null and b/data/tiff_sequence/tomo77_285.tiff differ diff --git a/data/tiff_sequence/tomo77_286.tiff b/data/tiff_sequence/tomo77_286.tiff new file mode 100644 index 0000000..7fca167 Binary files /dev/null and b/data/tiff_sequence/tomo77_286.tiff differ diff --git a/data/tiff_sequence/tomo77_287.tiff b/data/tiff_sequence/tomo77_287.tiff new file mode 100644 index 0000000..775baed Binary files /dev/null and b/data/tiff_sequence/tomo77_287.tiff differ diff --git a/data/tiff_sequence/tomo77_288.tiff b/data/tiff_sequence/tomo77_288.tiff new file mode 100644 index 0000000..70cb8ab Binary files /dev/null and b/data/tiff_sequence/tomo77_288.tiff differ diff --git a/data/tiff_sequence/tomo77_289.tiff b/data/tiff_sequence/tomo77_289.tiff new file mode 100644 index 0000000..5ffb9f4 Binary files /dev/null and b/data/tiff_sequence/tomo77_289.tiff differ diff --git a/data/tiff_sequence/tomo77_290.tiff b/data/tiff_sequence/tomo77_290.tiff new file mode 100644 index 0000000..0977d19 Binary files /dev/null and b/data/tiff_sequence/tomo77_290.tiff differ diff --git a/data/tiff_sequence/tomo77_291.tiff b/data/tiff_sequence/tomo77_291.tiff new file mode 100644 index 0000000..45cd273 Binary files /dev/null and b/data/tiff_sequence/tomo77_291.tiff differ diff --git a/data/tiff_sequence/tomo77_292.tiff b/data/tiff_sequence/tomo77_292.tiff new file mode 100644 index 0000000..0a39be3 Binary files /dev/null and b/data/tiff_sequence/tomo77_292.tiff differ diff --git a/data/tiff_sequence/tomo77_293.tiff b/data/tiff_sequence/tomo77_293.tiff new file mode 100644 index 0000000..d5080ca Binary files /dev/null and b/data/tiff_sequence/tomo77_293.tiff differ diff --git a/data/tiff_sequence/tomo77_294.tiff b/data/tiff_sequence/tomo77_294.tiff new file mode 100644 index 0000000..8f1b6a0 Binary files /dev/null and b/data/tiff_sequence/tomo77_294.tiff differ diff --git a/data/tiff_sequence/tomo77_295.tiff b/data/tiff_sequence/tomo77_295.tiff new file mode 100644 index 0000000..f776ab1 Binary files /dev/null and b/data/tiff_sequence/tomo77_295.tiff differ diff --git a/data/tiff_sequence/tomo77_296.tiff b/data/tiff_sequence/tomo77_296.tiff new file mode 100644 index 0000000..39b1230 Binary files /dev/null and b/data/tiff_sequence/tomo77_296.tiff differ diff --git a/data/tiff_sequence/tomo77_297.tiff b/data/tiff_sequence/tomo77_297.tiff new file mode 100644 index 0000000..0dea53e Binary files /dev/null and b/data/tiff_sequence/tomo77_297.tiff differ diff --git a/data/tiff_sequence/tomo77_298.tiff b/data/tiff_sequence/tomo77_298.tiff new file mode 100644 index 0000000..9beca1e Binary files /dev/null and b/data/tiff_sequence/tomo77_298.tiff differ diff --git a/data/tiff_sequence/tomo77_299.tiff b/data/tiff_sequence/tomo77_299.tiff new file mode 100644 index 0000000..52827ba Binary files /dev/null and b/data/tiff_sequence/tomo77_299.tiff differ diff --git a/data/tiff_sequence/tomo77_300.tiff b/data/tiff_sequence/tomo77_300.tiff new file mode 100644 index 0000000..46cc8e6 Binary files /dev/null and b/data/tiff_sequence/tomo77_300.tiff differ diff --git a/data/tiff_sequence/tomo77_301.tiff b/data/tiff_sequence/tomo77_301.tiff new file mode 100644 index 0000000..869df76 Binary files /dev/null and b/data/tiff_sequence/tomo77_301.tiff differ diff --git a/data/tiff_sequence/tomo77_302.tiff b/data/tiff_sequence/tomo77_302.tiff new file mode 100644 index 0000000..e7e52d2 Binary files /dev/null and b/data/tiff_sequence/tomo77_302.tiff differ diff --git a/data/tiff_sequence/tomo77_303.tiff b/data/tiff_sequence/tomo77_303.tiff new file mode 100644 index 0000000..5ebaa09 Binary files /dev/null and b/data/tiff_sequence/tomo77_303.tiff differ diff --git a/data/tiff_sequence/tomo77_304.tiff b/data/tiff_sequence/tomo77_304.tiff new file mode 100644 index 0000000..ea2d1c8 Binary files /dev/null and b/data/tiff_sequence/tomo77_304.tiff differ diff --git a/data/tiff_sequence/tomo77_305.tiff b/data/tiff_sequence/tomo77_305.tiff new file mode 100644 index 0000000..f908fbe Binary files /dev/null and b/data/tiff_sequence/tomo77_305.tiff differ diff --git a/data/tiff_sequence/tomo77_306.tiff b/data/tiff_sequence/tomo77_306.tiff new file mode 100644 index 0000000..b102edb Binary files /dev/null and b/data/tiff_sequence/tomo77_306.tiff differ diff --git a/data/tiff_sequence/tomo77_307.tiff b/data/tiff_sequence/tomo77_307.tiff new file mode 100644 index 0000000..6e29bd7 Binary files /dev/null and b/data/tiff_sequence/tomo77_307.tiff differ diff --git a/data/tiff_sequence/tomo77_308.tiff b/data/tiff_sequence/tomo77_308.tiff new file mode 100644 index 0000000..593c279 Binary files /dev/null and b/data/tiff_sequence/tomo77_308.tiff differ diff --git a/data/tiff_sequence/tomo77_309.tiff b/data/tiff_sequence/tomo77_309.tiff new file mode 100644 index 0000000..81b2a08 Binary files /dev/null and b/data/tiff_sequence/tomo77_309.tiff differ diff --git a/data/tiff_sequence/tomo77_310.tiff b/data/tiff_sequence/tomo77_310.tiff new file mode 100644 index 0000000..de0036a Binary files /dev/null and b/data/tiff_sequence/tomo77_310.tiff differ diff --git a/data/tiff_sequence/tomo77_311.tiff b/data/tiff_sequence/tomo77_311.tiff new file mode 100644 index 0000000..a3bb19a Binary files /dev/null and b/data/tiff_sequence/tomo77_311.tiff differ diff --git a/data/tiff_sequence/tomo77_312.tiff b/data/tiff_sequence/tomo77_312.tiff new file mode 100644 index 0000000..d99aa15 Binary files /dev/null and b/data/tiff_sequence/tomo77_312.tiff differ diff --git a/data/tiff_sequence/tomo77_313.tiff b/data/tiff_sequence/tomo77_313.tiff new file mode 100644 index 0000000..bb81b58 Binary files /dev/null and b/data/tiff_sequence/tomo77_313.tiff differ diff --git a/data/tiff_sequence/tomo77_314.tiff b/data/tiff_sequence/tomo77_314.tiff new file mode 100644 index 0000000..de8828a Binary files /dev/null and b/data/tiff_sequence/tomo77_314.tiff differ diff --git a/data/tiff_sequence/tomo77_315.tiff b/data/tiff_sequence/tomo77_315.tiff new file mode 100644 index 0000000..49ec6af Binary files /dev/null and b/data/tiff_sequence/tomo77_315.tiff differ diff --git a/data/tiff_sequence/tomo77_316.tiff b/data/tiff_sequence/tomo77_316.tiff new file mode 100644 index 0000000..36ad742 Binary files /dev/null and b/data/tiff_sequence/tomo77_316.tiff differ diff --git a/data/tiff_sequence/tomo77_317.tiff b/data/tiff_sequence/tomo77_317.tiff new file mode 100644 index 0000000..e5f7a35 Binary files /dev/null and b/data/tiff_sequence/tomo77_317.tiff differ diff --git a/data/tiff_sequence/tomo77_318.tiff b/data/tiff_sequence/tomo77_318.tiff new file mode 100644 index 0000000..04e1ae8 Binary files /dev/null and b/data/tiff_sequence/tomo77_318.tiff differ diff --git a/data/tiff_sequence/tomo77_319.tiff b/data/tiff_sequence/tomo77_319.tiff new file mode 100644 index 0000000..54e7ebe Binary files /dev/null and b/data/tiff_sequence/tomo77_319.tiff differ diff --git a/data/tiff_sequence/tomo77_320.tiff b/data/tiff_sequence/tomo77_320.tiff new file mode 100644 index 0000000..941e51c Binary files /dev/null and b/data/tiff_sequence/tomo77_320.tiff differ diff --git a/data/tiff_sequence/tomo77_321.tiff b/data/tiff_sequence/tomo77_321.tiff new file mode 100644 index 0000000..e7a6a5e Binary files /dev/null and b/data/tiff_sequence/tomo77_321.tiff differ diff --git a/data/tiff_sequence/tomo77_322.tiff b/data/tiff_sequence/tomo77_322.tiff new file mode 100644 index 0000000..b2d5ee5 Binary files /dev/null and b/data/tiff_sequence/tomo77_322.tiff differ diff --git a/data/tiff_sequence/tomo77_323.tiff b/data/tiff_sequence/tomo77_323.tiff new file mode 100644 index 0000000..462b381 Binary files /dev/null and b/data/tiff_sequence/tomo77_323.tiff differ diff --git a/data/tiff_sequence/tomo77_324.tiff b/data/tiff_sequence/tomo77_324.tiff new file mode 100644 index 0000000..1e090b9 Binary files /dev/null and b/data/tiff_sequence/tomo77_324.tiff differ diff --git a/data/tiff_sequence/tomo77_325.tiff b/data/tiff_sequence/tomo77_325.tiff new file mode 100644 index 0000000..4a29598 Binary files /dev/null and b/data/tiff_sequence/tomo77_325.tiff differ diff --git a/data/tiff_sequence/tomo77_326.tiff b/data/tiff_sequence/tomo77_326.tiff new file mode 100644 index 0000000..c6b186d Binary files /dev/null and b/data/tiff_sequence/tomo77_326.tiff differ diff --git a/data/tiff_sequence/tomo77_327.tiff b/data/tiff_sequence/tomo77_327.tiff new file mode 100644 index 0000000..6bb3adf Binary files /dev/null and b/data/tiff_sequence/tomo77_327.tiff differ diff --git a/data/tiff_sequence/tomo77_328.tiff b/data/tiff_sequence/tomo77_328.tiff new file mode 100644 index 0000000..a2d985e Binary files /dev/null and b/data/tiff_sequence/tomo77_328.tiff differ diff --git a/data/tiff_sequence/tomo77_329.tiff b/data/tiff_sequence/tomo77_329.tiff new file mode 100644 index 0000000..cb93187 Binary files /dev/null and b/data/tiff_sequence/tomo77_329.tiff differ diff --git a/data/tiff_sequence/tomo77_330.tiff b/data/tiff_sequence/tomo77_330.tiff new file mode 100644 index 0000000..9e8e525 Binary files /dev/null and b/data/tiff_sequence/tomo77_330.tiff differ diff --git a/data/tiff_sequence/tomo77_331.tiff b/data/tiff_sequence/tomo77_331.tiff new file mode 100644 index 0000000..91da395 Binary files /dev/null and b/data/tiff_sequence/tomo77_331.tiff differ diff --git a/data/tiff_sequence/tomo77_332.tiff b/data/tiff_sequence/tomo77_332.tiff new file mode 100644 index 0000000..f9cb27a Binary files /dev/null and b/data/tiff_sequence/tomo77_332.tiff differ diff --git a/data/tiff_sequence/tomo77_333.tiff b/data/tiff_sequence/tomo77_333.tiff new file mode 100644 index 0000000..242fb6c Binary files /dev/null and b/data/tiff_sequence/tomo77_333.tiff differ diff --git a/data/tiff_sequence/tomo77_334.tiff b/data/tiff_sequence/tomo77_334.tiff new file mode 100644 index 0000000..6f1bf99 Binary files /dev/null and b/data/tiff_sequence/tomo77_334.tiff differ diff --git a/data/tiff_sequence/tomo77_335.tiff b/data/tiff_sequence/tomo77_335.tiff new file mode 100644 index 0000000..bf98c67 Binary files /dev/null and b/data/tiff_sequence/tomo77_335.tiff differ diff --git a/data/tiff_sequence/tomo77_336.tiff b/data/tiff_sequence/tomo77_336.tiff new file mode 100644 index 0000000..51a44ea Binary files /dev/null and b/data/tiff_sequence/tomo77_336.tiff differ diff --git a/data/tiff_sequence/tomo77_337.tiff b/data/tiff_sequence/tomo77_337.tiff new file mode 100644 index 0000000..1db19cc Binary files /dev/null and b/data/tiff_sequence/tomo77_337.tiff differ diff --git a/data/tiff_sequence/tomo77_338.tiff b/data/tiff_sequence/tomo77_338.tiff new file mode 100644 index 0000000..180d623 Binary files /dev/null and b/data/tiff_sequence/tomo77_338.tiff differ diff --git a/data/tiff_sequence/tomo77_339.tiff b/data/tiff_sequence/tomo77_339.tiff new file mode 100644 index 0000000..fd8fb52 Binary files /dev/null and b/data/tiff_sequence/tomo77_339.tiff differ diff --git a/data/tiff_sequence/tomo77_340.tiff b/data/tiff_sequence/tomo77_340.tiff new file mode 100644 index 0000000..1008b6a Binary files /dev/null and b/data/tiff_sequence/tomo77_340.tiff differ diff --git a/data/tiff_sequence/tomo77_341.tiff b/data/tiff_sequence/tomo77_341.tiff new file mode 100644 index 0000000..12711e1 Binary files /dev/null and b/data/tiff_sequence/tomo77_341.tiff differ diff --git a/data/tiff_sequence/tomo77_342.tiff b/data/tiff_sequence/tomo77_342.tiff new file mode 100644 index 0000000..c1e22e0 Binary files /dev/null and b/data/tiff_sequence/tomo77_342.tiff differ diff --git a/data/tiff_sequence/tomo77_343.tiff b/data/tiff_sequence/tomo77_343.tiff new file mode 100644 index 0000000..2f141d4 Binary files /dev/null and b/data/tiff_sequence/tomo77_343.tiff differ diff --git a/data/tiff_sequence/tomo77_344.tiff b/data/tiff_sequence/tomo77_344.tiff new file mode 100644 index 0000000..8a31ac8 Binary files /dev/null and b/data/tiff_sequence/tomo77_344.tiff differ diff --git a/data/tiff_sequence/tomo77_345.tiff b/data/tiff_sequence/tomo77_345.tiff new file mode 100644 index 0000000..7913a20 Binary files /dev/null and b/data/tiff_sequence/tomo77_345.tiff differ diff --git a/data/tiff_sequence/tomo77_346.tiff b/data/tiff_sequence/tomo77_346.tiff new file mode 100644 index 0000000..2510bec Binary files /dev/null and b/data/tiff_sequence/tomo77_346.tiff differ diff --git a/data/tiff_sequence/tomo77_347.tiff b/data/tiff_sequence/tomo77_347.tiff new file mode 100644 index 0000000..bcef829 Binary files /dev/null and b/data/tiff_sequence/tomo77_347.tiff differ diff --git a/data/tiff_sequence/tomo77_348.tiff b/data/tiff_sequence/tomo77_348.tiff new file mode 100644 index 0000000..58a454b Binary files /dev/null and b/data/tiff_sequence/tomo77_348.tiff differ diff --git a/data/tiff_sequence/tomo77_349.tiff b/data/tiff_sequence/tomo77_349.tiff new file mode 100644 index 0000000..8f5ee68 Binary files /dev/null and b/data/tiff_sequence/tomo77_349.tiff differ diff --git a/data/tiff_sequence/tomo77_350.tiff b/data/tiff_sequence/tomo77_350.tiff new file mode 100644 index 0000000..d0336f4 Binary files /dev/null and b/data/tiff_sequence/tomo77_350.tiff differ diff --git a/data/tiff_sequence/tomo77_351.tiff b/data/tiff_sequence/tomo77_351.tiff new file mode 100644 index 0000000..cea86e7 Binary files /dev/null and b/data/tiff_sequence/tomo77_351.tiff differ diff --git a/data/tiff_sequence/tomo77_352.tiff b/data/tiff_sequence/tomo77_352.tiff new file mode 100644 index 0000000..d78869e Binary files /dev/null and b/data/tiff_sequence/tomo77_352.tiff differ diff --git a/data/tiff_sequence/tomo77_353.tiff b/data/tiff_sequence/tomo77_353.tiff new file mode 100644 index 0000000..f32945c Binary files /dev/null and b/data/tiff_sequence/tomo77_353.tiff differ diff --git a/data/tiff_sequence/tomo77_354.tiff b/data/tiff_sequence/tomo77_354.tiff new file mode 100644 index 0000000..e585980 Binary files /dev/null and b/data/tiff_sequence/tomo77_354.tiff differ diff --git a/data/tiff_sequence/tomo77_355.tiff b/data/tiff_sequence/tomo77_355.tiff new file mode 100644 index 0000000..601980e Binary files /dev/null and b/data/tiff_sequence/tomo77_355.tiff differ diff --git a/data/tiff_sequence/tomo77_356.tiff b/data/tiff_sequence/tomo77_356.tiff new file mode 100644 index 0000000..6f4b14a Binary files /dev/null and b/data/tiff_sequence/tomo77_356.tiff differ diff --git a/data/tiff_sequence/tomo77_357.tiff b/data/tiff_sequence/tomo77_357.tiff new file mode 100644 index 0000000..752c74d Binary files /dev/null and b/data/tiff_sequence/tomo77_357.tiff differ diff --git a/data/tiff_sequence/tomo77_358.tiff b/data/tiff_sequence/tomo77_358.tiff new file mode 100644 index 0000000..e257840 Binary files /dev/null and b/data/tiff_sequence/tomo77_358.tiff differ diff --git a/data/tiff_sequence/tomo77_359.tiff b/data/tiff_sequence/tomo77_359.tiff new file mode 100644 index 0000000..5f3635c Binary files /dev/null and b/data/tiff_sequence/tomo77_359.tiff differ diff --git a/data/tiff_sequence/tomo77_360.tiff b/data/tiff_sequence/tomo77_360.tiff new file mode 100644 index 0000000..b377844 Binary files /dev/null and b/data/tiff_sequence/tomo77_360.tiff differ diff --git a/data/tiff_sequence/tomo77_361.tiff b/data/tiff_sequence/tomo77_361.tiff new file mode 100644 index 0000000..6ebdda7 Binary files /dev/null and b/data/tiff_sequence/tomo77_361.tiff differ diff --git a/data/tiff_sequence/tomo77_362.tiff b/data/tiff_sequence/tomo77_362.tiff new file mode 100644 index 0000000..cc7bbf2 Binary files /dev/null and b/data/tiff_sequence/tomo77_362.tiff differ diff --git a/data/tiff_sequence/tomo77_363.tiff b/data/tiff_sequence/tomo77_363.tiff new file mode 100644 index 0000000..a1f5436 Binary files /dev/null and b/data/tiff_sequence/tomo77_363.tiff differ diff --git a/data/tiff_sequence/tomo77_364.tiff b/data/tiff_sequence/tomo77_364.tiff new file mode 100644 index 0000000..cfcac67 Binary files /dev/null and b/data/tiff_sequence/tomo77_364.tiff differ diff --git a/data/tiff_sequence/tomo77_365.tiff b/data/tiff_sequence/tomo77_365.tiff new file mode 100644 index 0000000..380217d Binary files /dev/null and b/data/tiff_sequence/tomo77_365.tiff differ diff --git a/data/tiff_sequence/tomo77_366.tiff b/data/tiff_sequence/tomo77_366.tiff new file mode 100644 index 0000000..7aa907c Binary files /dev/null and b/data/tiff_sequence/tomo77_366.tiff differ diff --git a/data/tiff_sequence/tomo77_367.tiff b/data/tiff_sequence/tomo77_367.tiff new file mode 100644 index 0000000..1633313 Binary files /dev/null and b/data/tiff_sequence/tomo77_367.tiff differ diff --git a/data/tiff_sequence/tomo77_368.tiff b/data/tiff_sequence/tomo77_368.tiff new file mode 100644 index 0000000..beeadd1 Binary files /dev/null and b/data/tiff_sequence/tomo77_368.tiff differ diff --git a/data/tiff_sequence/tomo77_369.tiff b/data/tiff_sequence/tomo77_369.tiff new file mode 100644 index 0000000..1fb8f98 Binary files /dev/null and b/data/tiff_sequence/tomo77_369.tiff differ diff --git a/data/tiff_sequence/tomo77_370.tiff b/data/tiff_sequence/tomo77_370.tiff new file mode 100644 index 0000000..46bad1b Binary files /dev/null and b/data/tiff_sequence/tomo77_370.tiff differ diff --git a/data/tiff_sequence/tomo77_371.tiff b/data/tiff_sequence/tomo77_371.tiff new file mode 100644 index 0000000..7542cce Binary files /dev/null and b/data/tiff_sequence/tomo77_371.tiff differ diff --git a/data/tiff_sequence/tomo77_372.tiff b/data/tiff_sequence/tomo77_372.tiff new file mode 100644 index 0000000..77c69e0 Binary files /dev/null and b/data/tiff_sequence/tomo77_372.tiff differ diff --git a/data/tiff_sequence/tomo77_373.tiff b/data/tiff_sequence/tomo77_373.tiff new file mode 100644 index 0000000..acf8833 Binary files /dev/null and b/data/tiff_sequence/tomo77_373.tiff differ diff --git a/data/tiff_sequence/tomo77_374.tiff b/data/tiff_sequence/tomo77_374.tiff new file mode 100644 index 0000000..dfd31f3 Binary files /dev/null and b/data/tiff_sequence/tomo77_374.tiff differ diff --git a/data/tiff_sequence/tomo77_375.tiff b/data/tiff_sequence/tomo77_375.tiff new file mode 100644 index 0000000..f758582 Binary files /dev/null and b/data/tiff_sequence/tomo77_375.tiff differ diff --git a/data/tiff_sequence/tomo77_376.tiff b/data/tiff_sequence/tomo77_376.tiff new file mode 100644 index 0000000..7ef7ab9 Binary files /dev/null and b/data/tiff_sequence/tomo77_376.tiff differ diff --git a/data/tiff_sequence/tomo77_377.tiff b/data/tiff_sequence/tomo77_377.tiff new file mode 100644 index 0000000..5c9885f Binary files /dev/null and b/data/tiff_sequence/tomo77_377.tiff differ diff --git a/data/tiff_sequence/tomo77_378.tiff b/data/tiff_sequence/tomo77_378.tiff new file mode 100644 index 0000000..73b16f4 Binary files /dev/null and b/data/tiff_sequence/tomo77_378.tiff differ diff --git a/data/tiff_sequence/tomo77_379.tiff b/data/tiff_sequence/tomo77_379.tiff new file mode 100644 index 0000000..badf543 Binary files /dev/null and b/data/tiff_sequence/tomo77_379.tiff differ diff --git a/data/tiff_sequence/tomo77_380.tiff b/data/tiff_sequence/tomo77_380.tiff new file mode 100644 index 0000000..9815e65 Binary files /dev/null and b/data/tiff_sequence/tomo77_380.tiff differ diff --git a/data/tiff_sequence/tomo77_381.tiff b/data/tiff_sequence/tomo77_381.tiff new file mode 100644 index 0000000..772dbd7 Binary files /dev/null and b/data/tiff_sequence/tomo77_381.tiff differ diff --git a/data/tiff_sequence/tomo77_382.tiff b/data/tiff_sequence/tomo77_382.tiff new file mode 100644 index 0000000..dd5531b Binary files /dev/null and b/data/tiff_sequence/tomo77_382.tiff differ diff --git a/data/tiff_sequence/tomo77_383.tiff b/data/tiff_sequence/tomo77_383.tiff new file mode 100644 index 0000000..9d27cc7 Binary files /dev/null and b/data/tiff_sequence/tomo77_383.tiff differ diff --git a/data/tiff_sequence/tomo77_384.tiff b/data/tiff_sequence/tomo77_384.tiff new file mode 100644 index 0000000..b24f0ea Binary files /dev/null and b/data/tiff_sequence/tomo77_384.tiff differ diff --git a/data/tiff_sequence/tomo77_385.tiff b/data/tiff_sequence/tomo77_385.tiff new file mode 100644 index 0000000..14a1e50 Binary files /dev/null and b/data/tiff_sequence/tomo77_385.tiff differ diff --git a/data/tiff_sequence/tomo77_386.tiff b/data/tiff_sequence/tomo77_386.tiff new file mode 100644 index 0000000..58f021e Binary files /dev/null and b/data/tiff_sequence/tomo77_386.tiff differ diff --git a/data/tiff_sequence/tomo77_387.tiff b/data/tiff_sequence/tomo77_387.tiff new file mode 100644 index 0000000..78ee990 Binary files /dev/null and b/data/tiff_sequence/tomo77_387.tiff differ diff --git a/data/tiff_sequence/tomo77_388.tiff b/data/tiff_sequence/tomo77_388.tiff new file mode 100644 index 0000000..87accfb Binary files /dev/null and b/data/tiff_sequence/tomo77_388.tiff differ diff --git a/data/tiff_sequence/tomo77_389.tiff b/data/tiff_sequence/tomo77_389.tiff new file mode 100644 index 0000000..6e246e2 Binary files /dev/null and b/data/tiff_sequence/tomo77_389.tiff differ diff --git a/data/tiff_sequence/tomo77_390.tiff b/data/tiff_sequence/tomo77_390.tiff new file mode 100644 index 0000000..34ed4f8 Binary files /dev/null and b/data/tiff_sequence/tomo77_390.tiff differ diff --git a/data/tiff_sequence/tomo77_391.tiff b/data/tiff_sequence/tomo77_391.tiff new file mode 100644 index 0000000..35e264d Binary files /dev/null and b/data/tiff_sequence/tomo77_391.tiff differ diff --git a/data/tiff_sequence/tomo77_392.tiff b/data/tiff_sequence/tomo77_392.tiff new file mode 100644 index 0000000..4998f00 Binary files /dev/null and b/data/tiff_sequence/tomo77_392.tiff differ diff --git a/data/tiff_sequence/tomo77_393.tiff b/data/tiff_sequence/tomo77_393.tiff new file mode 100644 index 0000000..997a792 Binary files /dev/null and b/data/tiff_sequence/tomo77_393.tiff differ diff --git a/data/tiff_sequence/tomo77_394.tiff b/data/tiff_sequence/tomo77_394.tiff new file mode 100644 index 0000000..855a554 Binary files /dev/null and b/data/tiff_sequence/tomo77_394.tiff differ diff --git a/data/tiff_sequence/tomo77_395.tiff b/data/tiff_sequence/tomo77_395.tiff new file mode 100644 index 0000000..4d94a41 Binary files /dev/null and b/data/tiff_sequence/tomo77_395.tiff differ diff --git a/data/tiff_sequence/tomo77_396.tiff b/data/tiff_sequence/tomo77_396.tiff new file mode 100644 index 0000000..7c3aa1f Binary files /dev/null and b/data/tiff_sequence/tomo77_396.tiff differ diff --git a/data/tiff_sequence/tomo77_397.tiff b/data/tiff_sequence/tomo77_397.tiff new file mode 100644 index 0000000..8b53ee6 Binary files /dev/null and b/data/tiff_sequence/tomo77_397.tiff differ diff --git a/data/tiff_sequence/tomo77_398.tiff b/data/tiff_sequence/tomo77_398.tiff new file mode 100644 index 0000000..e2cfc45 Binary files /dev/null and b/data/tiff_sequence/tomo77_398.tiff differ diff --git a/data/tiff_sequence/tomo77_399.tiff b/data/tiff_sequence/tomo77_399.tiff new file mode 100644 index 0000000..a83ba50 Binary files /dev/null and b/data/tiff_sequence/tomo77_399.tiff differ diff --git a/data/tiff_sequence/tomo77_400.tiff b/data/tiff_sequence/tomo77_400.tiff new file mode 100644 index 0000000..258f306 Binary files /dev/null and b/data/tiff_sequence/tomo77_400.tiff differ diff --git a/data/tiff_sequence/tomo77_401.tiff b/data/tiff_sequence/tomo77_401.tiff new file mode 100644 index 0000000..58b4a78 Binary files /dev/null and b/data/tiff_sequence/tomo77_401.tiff differ diff --git a/data/tiff_sequence/tomo77_402.tiff b/data/tiff_sequence/tomo77_402.tiff new file mode 100644 index 0000000..91a9358 Binary files /dev/null and b/data/tiff_sequence/tomo77_402.tiff differ diff --git a/data/tiff_sequence/tomo77_403.tiff b/data/tiff_sequence/tomo77_403.tiff new file mode 100644 index 0000000..fa2e3d7 Binary files /dev/null and b/data/tiff_sequence/tomo77_403.tiff differ diff --git a/data/tiff_sequence/tomo77_404.tiff b/data/tiff_sequence/tomo77_404.tiff new file mode 100644 index 0000000..58f2dc3 Binary files /dev/null and b/data/tiff_sequence/tomo77_404.tiff differ diff --git a/data/tiff_sequence/tomo77_405.tiff b/data/tiff_sequence/tomo77_405.tiff new file mode 100644 index 0000000..3d29199 Binary files /dev/null and b/data/tiff_sequence/tomo77_405.tiff differ diff --git a/data/tiff_sequence/tomo77_406.tiff b/data/tiff_sequence/tomo77_406.tiff new file mode 100644 index 0000000..88647d2 Binary files /dev/null and b/data/tiff_sequence/tomo77_406.tiff differ diff --git a/data/tiff_sequence/tomo77_407.tiff b/data/tiff_sequence/tomo77_407.tiff new file mode 100644 index 0000000..00c52af Binary files /dev/null and b/data/tiff_sequence/tomo77_407.tiff differ diff --git a/data/tiff_sequence/tomo77_408.tiff b/data/tiff_sequence/tomo77_408.tiff new file mode 100644 index 0000000..1357bed Binary files /dev/null and b/data/tiff_sequence/tomo77_408.tiff differ diff --git a/data/tiff_sequence/tomo77_409.tiff b/data/tiff_sequence/tomo77_409.tiff new file mode 100644 index 0000000..af18485 Binary files /dev/null and b/data/tiff_sequence/tomo77_409.tiff differ diff --git a/data/tiff_sequence/tomo77_410.tiff b/data/tiff_sequence/tomo77_410.tiff new file mode 100644 index 0000000..e26791a Binary files /dev/null and b/data/tiff_sequence/tomo77_410.tiff differ diff --git a/data/tiff_sequence/tomo77_411.tiff b/data/tiff_sequence/tomo77_411.tiff new file mode 100644 index 0000000..4a0ee1f Binary files /dev/null and b/data/tiff_sequence/tomo77_411.tiff differ diff --git a/data/tiff_sequence/tomo77_412.tiff b/data/tiff_sequence/tomo77_412.tiff new file mode 100644 index 0000000..319053c Binary files /dev/null and b/data/tiff_sequence/tomo77_412.tiff differ diff --git a/data/tiff_sequence/tomo77_413.tiff b/data/tiff_sequence/tomo77_413.tiff new file mode 100644 index 0000000..4c5b60e Binary files /dev/null and b/data/tiff_sequence/tomo77_413.tiff differ diff --git a/data/tiff_sequence/tomo77_414.tiff b/data/tiff_sequence/tomo77_414.tiff new file mode 100644 index 0000000..3b1f5e8 Binary files /dev/null and b/data/tiff_sequence/tomo77_414.tiff differ diff --git a/data/tiff_sequence/tomo77_415.tiff b/data/tiff_sequence/tomo77_415.tiff new file mode 100644 index 0000000..7d91021 Binary files /dev/null and b/data/tiff_sequence/tomo77_415.tiff differ diff --git a/data/tiff_sequence/tomo77_416.tiff b/data/tiff_sequence/tomo77_416.tiff new file mode 100644 index 0000000..334c7cb Binary files /dev/null and b/data/tiff_sequence/tomo77_416.tiff differ diff --git a/data/tiff_sequence/tomo77_417.tiff b/data/tiff_sequence/tomo77_417.tiff new file mode 100644 index 0000000..4732d1b Binary files /dev/null and b/data/tiff_sequence/tomo77_417.tiff differ diff --git a/data/tiff_sequence/tomo77_418.tiff b/data/tiff_sequence/tomo77_418.tiff new file mode 100644 index 0000000..740b7f8 Binary files /dev/null and b/data/tiff_sequence/tomo77_418.tiff differ diff --git a/data/tiff_sequence/tomo77_419.tiff b/data/tiff_sequence/tomo77_419.tiff new file mode 100644 index 0000000..504a40d Binary files /dev/null and b/data/tiff_sequence/tomo77_419.tiff differ diff --git a/data/tiff_sequence/tomo77_420.tiff b/data/tiff_sequence/tomo77_420.tiff new file mode 100644 index 0000000..66bfb1d Binary files /dev/null and b/data/tiff_sequence/tomo77_420.tiff differ diff --git a/data/tiff_sequence/tomo77_421.tiff b/data/tiff_sequence/tomo77_421.tiff new file mode 100644 index 0000000..7794789 Binary files /dev/null and b/data/tiff_sequence/tomo77_421.tiff differ diff --git a/data/tiff_sequence/tomo77_422.tiff b/data/tiff_sequence/tomo77_422.tiff new file mode 100644 index 0000000..20abc92 Binary files /dev/null and b/data/tiff_sequence/tomo77_422.tiff differ diff --git a/data/tiff_sequence/tomo77_423.tiff b/data/tiff_sequence/tomo77_423.tiff new file mode 100644 index 0000000..dad42ed Binary files /dev/null and b/data/tiff_sequence/tomo77_423.tiff differ diff --git a/data/tiff_sequence/tomo77_424.tiff b/data/tiff_sequence/tomo77_424.tiff new file mode 100644 index 0000000..12b34ab Binary files /dev/null and b/data/tiff_sequence/tomo77_424.tiff differ diff --git a/data/tiff_sequence/tomo77_425.tiff b/data/tiff_sequence/tomo77_425.tiff new file mode 100644 index 0000000..e1472f1 Binary files /dev/null and b/data/tiff_sequence/tomo77_425.tiff differ diff --git a/data/tiff_sequence/tomo77_426.tiff b/data/tiff_sequence/tomo77_426.tiff new file mode 100644 index 0000000..9405cc7 Binary files /dev/null and b/data/tiff_sequence/tomo77_426.tiff differ diff --git a/data/tiff_sequence/tomo77_427.tiff b/data/tiff_sequence/tomo77_427.tiff new file mode 100644 index 0000000..5f8e1e0 Binary files /dev/null and b/data/tiff_sequence/tomo77_427.tiff differ diff --git a/data/tiff_sequence/tomo77_428.tiff b/data/tiff_sequence/tomo77_428.tiff new file mode 100644 index 0000000..6dc4207 Binary files /dev/null and b/data/tiff_sequence/tomo77_428.tiff differ diff --git a/data/tiff_sequence/tomo77_429.tiff b/data/tiff_sequence/tomo77_429.tiff new file mode 100644 index 0000000..5a2f6d4 Binary files /dev/null and b/data/tiff_sequence/tomo77_429.tiff differ diff --git a/data/tiff_sequence/tomo77_430.tiff b/data/tiff_sequence/tomo77_430.tiff new file mode 100644 index 0000000..3130e94 Binary files /dev/null and b/data/tiff_sequence/tomo77_430.tiff differ diff --git a/data/tiff_sequence/tomo77_431.tiff b/data/tiff_sequence/tomo77_431.tiff new file mode 100644 index 0000000..ddb531d Binary files /dev/null and b/data/tiff_sequence/tomo77_431.tiff differ diff --git a/data/tiff_sequence/tomo77_432.tiff b/data/tiff_sequence/tomo77_432.tiff new file mode 100644 index 0000000..af93ac6 Binary files /dev/null and b/data/tiff_sequence/tomo77_432.tiff differ diff --git a/data/tiff_sequence/tomo77_433.tiff b/data/tiff_sequence/tomo77_433.tiff new file mode 100644 index 0000000..3bb12d6 Binary files /dev/null and b/data/tiff_sequence/tomo77_433.tiff differ diff --git a/data/tiff_sequence/tomo77_434.tiff b/data/tiff_sequence/tomo77_434.tiff new file mode 100644 index 0000000..cfffdc8 Binary files /dev/null and b/data/tiff_sequence/tomo77_434.tiff differ diff --git a/data/tiff_sequence/tomo77_435.tiff b/data/tiff_sequence/tomo77_435.tiff new file mode 100644 index 0000000..383cf24 Binary files /dev/null and b/data/tiff_sequence/tomo77_435.tiff differ diff --git a/data/tiff_sequence/tomo77_436.tiff b/data/tiff_sequence/tomo77_436.tiff new file mode 100644 index 0000000..5041935 Binary files /dev/null and b/data/tiff_sequence/tomo77_436.tiff differ diff --git a/data/tiff_sequence/tomo77_437.tiff b/data/tiff_sequence/tomo77_437.tiff new file mode 100644 index 0000000..e7eafc3 Binary files /dev/null and b/data/tiff_sequence/tomo77_437.tiff differ diff --git a/data/tiff_sequence/tomo77_438.tiff b/data/tiff_sequence/tomo77_438.tiff new file mode 100644 index 0000000..8456102 Binary files /dev/null and b/data/tiff_sequence/tomo77_438.tiff differ diff --git a/data/tiff_sequence/tomo77_439.tiff b/data/tiff_sequence/tomo77_439.tiff new file mode 100644 index 0000000..ffe99e2 Binary files /dev/null and b/data/tiff_sequence/tomo77_439.tiff differ diff --git a/data/tiff_sequence/tomo77_440.tiff b/data/tiff_sequence/tomo77_440.tiff new file mode 100644 index 0000000..434eb5b Binary files /dev/null and b/data/tiff_sequence/tomo77_440.tiff differ diff --git a/data/tiff_sequence/tomo77_441.tiff b/data/tiff_sequence/tomo77_441.tiff new file mode 100644 index 0000000..09f304c Binary files /dev/null and b/data/tiff_sequence/tomo77_441.tiff differ diff --git a/data/tiff_sequence/tomo77_442.tiff b/data/tiff_sequence/tomo77_442.tiff new file mode 100644 index 0000000..9fa4e9d Binary files /dev/null and b/data/tiff_sequence/tomo77_442.tiff differ diff --git a/data/tiff_sequence/tomo77_443.tiff b/data/tiff_sequence/tomo77_443.tiff new file mode 100644 index 0000000..15d0b68 Binary files /dev/null and b/data/tiff_sequence/tomo77_443.tiff differ diff --git a/data/tiff_sequence/tomo77_444.tiff b/data/tiff_sequence/tomo77_444.tiff new file mode 100644 index 0000000..1d9681c Binary files /dev/null and b/data/tiff_sequence/tomo77_444.tiff differ diff --git a/data/tiff_sequence/tomo77_445.tiff b/data/tiff_sequence/tomo77_445.tiff new file mode 100644 index 0000000..b92746b Binary files /dev/null and b/data/tiff_sequence/tomo77_445.tiff differ diff --git a/data/tiff_sequence/tomo77_446.tiff b/data/tiff_sequence/tomo77_446.tiff new file mode 100644 index 0000000..d9c0921 Binary files /dev/null and b/data/tiff_sequence/tomo77_446.tiff differ diff --git a/data/tiff_sequence/tomo77_447.tiff b/data/tiff_sequence/tomo77_447.tiff new file mode 100644 index 0000000..0204a72 Binary files /dev/null and b/data/tiff_sequence/tomo77_447.tiff differ diff --git a/data/tiff_sequence/tomo77_448.tiff b/data/tiff_sequence/tomo77_448.tiff new file mode 100644 index 0000000..39cda0b Binary files /dev/null and b/data/tiff_sequence/tomo77_448.tiff differ diff --git a/data/tiff_sequence/tomo77_449.tiff b/data/tiff_sequence/tomo77_449.tiff new file mode 100644 index 0000000..c723e26 Binary files /dev/null and b/data/tiff_sequence/tomo77_449.tiff differ diff --git a/data/tiff_sequence/tomo77_450.tiff b/data/tiff_sequence/tomo77_450.tiff new file mode 100644 index 0000000..047df36 Binary files /dev/null and b/data/tiff_sequence/tomo77_450.tiff differ diff --git a/docs/_static/images/frontpage_usage.gif b/docs/_static/images/frontpage_usage.gif deleted file mode 100644 index ce3fb23..0000000 Binary files a/docs/_static/images/frontpage_usage.gif and /dev/null differ diff --git a/docs/_static/images/tomopy-favicon.ico b/docs/_static/images/tomopy-favicon.ico new file mode 100644 index 0000000..75c076e Binary files /dev/null and b/docs/_static/images/tomopy-favicon.ico differ diff --git a/docs/_static/videos/alignment.mp4 b/docs/_static/videos/alignment.mp4 new file mode 100644 index 0000000..4d5b84e Binary files /dev/null and b/docs/_static/videos/alignment.mp4 differ diff --git a/docs/_static/videos/center.mp4 b/docs/_static/videos/center.mp4 new file mode 100644 index 0000000..ba88903 Binary files /dev/null and b/docs/_static/videos/center.mp4 differ diff --git a/docs/_static/videos/data_explorer_fast.mp4 b/docs/_static/videos/data_explorer_fast.mp4 new file mode 100644 index 0000000..ab89b86 Binary files /dev/null and b/docs/_static/videos/data_explorer_fast.mp4 differ diff --git a/docs/_static/videos/front_page.mp4 b/docs/_static/videos/front_page.mp4 new file mode 100644 index 0000000..28400b6 Binary files /dev/null and b/docs/_static/videos/front_page.mp4 differ diff --git a/docs/_static/videos/import.mp4 b/docs/_static/videos/import.mp4 new file mode 100644 index 0000000..bac7371 Binary files /dev/null and b/docs/_static/videos/import.mp4 differ diff --git a/docs/_static/videos/intro.mp4 b/docs/_static/videos/intro.mp4 new file mode 100644 index 0000000..20112ac Binary files /dev/null and b/docs/_static/videos/intro.mp4 differ diff --git a/docs/_static/videos/looking_at_data_fast.mp4 b/docs/_static/videos/looking_at_data_fast.mp4 new file mode 100644 index 0000000..4b92ccd Binary files /dev/null and b/docs/_static/videos/looking_at_data_fast.mp4 differ diff --git a/docs/_static/videos/subdirs.mp4 b/docs/_static/videos/subdirs.mp4 new file mode 100644 index 0000000..0493cf4 Binary files /dev/null and b/docs/_static/videos/subdirs.mp4 differ diff --git a/docs/conf.py b/docs/conf.py index d4860f0..8816a7d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -17,6 +17,8 @@ import subprocess import sys +# import tomopyui + try: from tomopyui import __version__ as release except ImportError: @@ -26,10 +28,14 @@ # -- Project information ----------------------------------------------------- project = "tomopyui" -copyright = "2021, Samuel Scott Welborn" +copyright = "2022, Samuel Scott Welborn" author = "Samuel Scott Welborn" - +sys.path.insert(0, os.path.abspath("../tomopyui")) +sys.path.insert(0, os.path.abspath("../tomopyui/widgets/")) +sys.path.insert(0, os.path.abspath("../tomopyui/backend/")) +sys.path.insert(0, os.path.abspath("../tomopyui/tomocupy/")) +sys.path.insert(0, os.path.abspath(".")) # -- Generate API ------------------------------------------------------------ api_folder_name = "api" shutil.rmtree(api_folder_name, ignore_errors=True) # in case of new or renamed modules @@ -42,8 +48,19 @@ "--no-toc", "--templatedir _templates", "--separate", + "--implicit-namespaces", "../tomopyui/", # excluded modules + "../tomopyui/*/main.py", + "../*/main.py", + "../tomopyui/*backend*", + "../tomopyui/*tomocupy*" + # "../*/fft_cucim.py" + # "../*/backend/" + # "../*/ipyplot.py", + # "../*/mpl_kwargs.py", + # "../*/xarray_helpers.py", + # "../*/tests", ] ), shell=True, @@ -77,7 +94,7 @@ "show-inheritance": True, "undoc-members": True, } -autodoc_mock_imports = ["cupyx", "cupy", "tqdm"] +autodoc_mock_imports = ["tqdm", "cupy", "cupyx"] add_module_names = False napoleon_google_docstring = False napoleon_include_private_with_doc = False diff --git a/docs/contributing.md b/docs/contributing.md index b86fab0..0d5ec46 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -21,8 +21,8 @@ If you are working in a Jupyter Notebook, then in order to see your code changes %matplotlib ipympl import tomopyui.widgets.main as main - dashboard, file_import, center, prep, align, recon = main.create_dashboard() - dashboard + dashboard_output, dashboard, file_import, prep, center, align, recon, dataexplorer = main.create_dashboard("APS") # can be "SSRL_62C", "ALS_832", "APS" + dashboard_output ``` ### Working with Git diff --git a/docs/environment-testing-docs.yml b/docs/environment-testing-docs.yml deleted file mode 100644 index c9e17b8..0000000 --- a/docs/environment-testing-docs.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: tomopyui-testing-docs -channels: - - defaults -dependencies: - - python=3.9 - - tomopy - - dxchange - - jupyterlab - - astropy - - ipyfilechooser - - ipywidgets=8 - - widgetsnbextension - - jupyterlab_widgets - - ca-certificates - - certifi - - openssl - - ipympl -prefix: C:\Users\Sam\anaconda3\envs\tomopyui-testing-docs diff --git a/docs/environment.yml b/docs/environment.yml deleted file mode 100644 index 56d2260..0000000 --- a/docs/environment.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: tomopyui-docs -channels: - - conda-forge/label/widgetsnbextension_dev - - conda-forge/label/jupyterlab_widgets_rc - - conda-forge/label/ipywidgets_dev - - astra-toolbox - - conda-forge - - defaults -dependencies: - - python=3.9 - - pip - - tomopy - - dxchange - - jupyterlab - - astropy - - ipyfilechooser - - ipywidgets=8 - - widgetsnbextension - - jupyterlab_widgets - - ca-certificates - - certifi - - openssl - - astra-toolbox - - joblib - - ipympl [--no-deps] - - sphinx - - jupyter_sphinx - - numpydoc - - sphinx-copybutton - - sphinx-panels - - sphinx-thebe - - sphinx-togglebutton - - sphinx-book-theme - - sphinx-gallery - - myst-nb [--no-deps] - - pip: - - mpl-interactions \ No newline at end of file diff --git a/docs/examples.md b/docs/examples.md index f192b00..64b0e7e 100644 --- a/docs/examples.md +++ b/docs/examples.md @@ -1,3 +1,76 @@ -# Examples +# Tutorial +:::{note} -## Working on this page... \ No newline at end of file +This is a tutorial as of v0.0.4. + +::: +## Opening Jupyter Lab + +```{jupyter-execute} +:hide-code: + +from ipywidgets import Video, HBox, Layout +Video.from_file("_static/videos/intro.mp4", autoplay=False) +``` + +## General Tutorial (latest) +```{jupyter-execute} +:hide-code: + +# add these options if video looks weird: width=600, height=300 +from IPython.display import YouTubeVideo +YouTubeVideo('O2RJCL4x4JE') +``` + +## Importing Data + +```{jupyter-execute} +:hide-code: + +from ipywidgets import Video, HBox, Layout +Video.from_file("_static/videos/import.mp4", autoplay=False) +``` + +Once you import your data, you can check it out and save a movie of it: +```{jupyter-execute} +:hide-code: + +from ipywidgets import Video, HBox, Layout +Video.from_file("_static/videos/looking_at_data_fast.mp4", autoplay=False) +``` + +## Finding Center of Rotation + +```{jupyter-execute} +:hide-code: + +from ipywidgets import Video, HBox, Layout +Video.from_file("_static/videos/center.mp4", autoplay=False) +``` + +## Alignment (with CUDA) + +```{jupyter-execute} +:hide-code: + +from ipywidgets import Video, HBox, Layout +Video.from_file("_static/videos/alignment.mp4", autoplay=False) +``` + +This alignment creates subfolders with metadata: + +```{jupyter-execute} +:hide-code: + +from ipywidgets import Video, HBox, Layout +Video.from_file("_static/videos/subdirs.mp4", autoplay=False) +``` + +## Exploring aligned data + +```{jupyter-execute} +:hide-code: + +from ipywidgets import Video, HBox, Layout +Video.from_file("_static/videos/data_explorer_fast.mp4", autoplay=False) +``` \ No newline at end of file diff --git a/docs/howitworks.md b/docs/howitworks.md index 03ed6b9..8110dc6 100644 --- a/docs/howitworks.md +++ b/docs/howitworks.md @@ -20,68 +20,50 @@ As mentioned on the [home page](https://tomopyui.readthedocs.io/en/latest/), `to - [Documentation](https://ipywidgets.readthedocs.io/en/latest/) - The user interface is created inside of [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/getting_started/overview.html) using ipywidgets. -**mpl-interactions** -- [Documentation](https://mpl-interactions.readthedocs.io/en/stable/index.html) -- Package which improves some of the interactivity of `matplotlib` widgets. -- The documentation looks very similar to the one you see here. Big thanks to [@ianhi](https://github.com/ianhi) for developing `mpl-interactions` and [@redeboer](https://github.com/redeboer) for developing those docs. - **cupy** - [Documentation](https://docs.cupy.dev/en/stable/overview.html) - Sends [numpy](https://numpy.org/doc/1.21/) arrays to the GPU for calculations +**bqplot** +- [Documentation](https://bqplot.readthedocs.io/en/latest/) + +**bqplot-image-gl** +- [pip](https://pypi.org/project/bqplot-image-gl/) + **tomocupy** - [Documentation TK](https://en.wikipedia.org/wiki/To_come_(publishing)) -- Utilizes power of GPU-acceleration to speed up automatic alignment +- Utilizes GPU-acceleration to speed up automatic alignment - Built from [tomopy](https://tomopy.readthedocs.io/en/latest/), but sends data to the GPU to do some of the calculations. - Only a few functions available now, but could be expanded significantly - Included as a module in tomopyui (for now) - Helps run a lot of the backend of tomopyui +## Structure + +The code is divided into the frontend (`tomopyui.widgets`) and the backend (`tomopyui.backend` and `tomopyui.tomocupy`). -## Code structure +When the user calls `tomopyui.widgets.main.create_dashboard`, they are creating several different objects: -The code is divided into the frontend (currently {doc}`api/tomopyui.widgets`) and the backend (currently {doc}`api/tomopyui.backend` and {doc}`api/tomopyui.tomocupy`). This could be divided differently/renamed later, but it is a starting point. +1. `Import` tab: + - The type of `Import` tab is dictated by the user's choice in string on the call from the Jupyter notebook (e.g., "SSRL_62C" or "ALS_832") + - Holds both raw and prenormalized `Uploader`s, which hold a specific type of `RawProjections` (depending on the choice above) and a general `Prenormalized_Projections` object, respectively. + - `Uploader`s upload the data (hold buttons, viewers, etc.), and `Projection`s hold the data. + - `Projection` objects also hold specific types of metadata. Storing metadata along the processing pipeline is critical. -### Frontend +2. `Center` tab: + - Used to interactively find the center of rotation + - Contains two [Accordion](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html#Accordion) widgets: + 1. [`tomopy.recon.rotation.find_center`](https://tomopy.readthedocs.io/en/latest/api/tomopy.recon.rotation.html) and [`tomopy.recon.rotation.find_center_vo`](https://tomopy.readthedocs.io/en/latest/api/tomopy.recon.rotation.html) for automatic center finding. + 2. Reconstruction of a given slice of the data at various centers. The user can then maneuver the slider to find the best center by eye. -When the user calls `tomopyui.widgets.main.create_dashboard`, they are creating several different objects: +3. `Align` tab: + - Used to align jittery data using the automatic alignment algorithms built into tomopy. + - Align only a part of a dataset by selecting an ROI in the "Imported Projections" viewer. + +4. `Recon` tab: + - Used to reconstruct data using one of many algorithms available from TomoPy and Astra Toolbox. + +5. `Data Explorer` tab: + - Used to look at prior alignments. -{class}`~tomopyui.widgets.meta.Import` -- Helps import data. -- Creates the first tab of the dashboard, which contains: - 1. [ipyfilechooser](https://github.com/crahan/ipyfilechooser) widget for choosing a file. - 2. [FloatText](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html#FloatText) widgets for start and end angle. - 3. [IntText](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html#IntText) widget for number of angles. This currently does not do anything: the file import automatically grabs the number of angles. -- It is a starting point to many of the other classes - - Other classes use file path information from {class}`~tomopyui.widgets.meta.Import` to import a {class}`~tomopyui.backend.tomodata.TomoData` object and run the algorithms on the backend. - -{class}`~tomopyui.widgets.meta.Plotter` -- Helps plot data -- Uses [hyperslicer](https://mpl-interactions.readthedocs.io/en/stable/examples/hyperslicer.html) and [histogram](https://mpl-interactions.readthedocs.io/en/stable/examples/hist.html) to make this interactive in Jupyter. - -{class}`~tomopyui.widgets.meta.Center` -- Creates Center tab in the dashboard -- Contains two [Accordion](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html#Accordion) widgets: - 1. Button clicks use [`tomopy.recon.rotation.find_center`](https://tomopy.readthedocs.io/en/latest/api/tomopy.recon.rotation.html) and [`tomopy.recon.rotation.find_center_vo`](https://tomopy.readthedocs.io/en/latest/api/tomopy.recon.rotation.html) for automatic center finding. - 2. Button clicks use {doc}`api/tomopyui.backend.util.center`.write_center, which is a copy of [`tomopy.recon.rotation.write_center`](https://tomopy.readthedocs.io/en/latest/api/tomopy.recon.rotation.html) to reconstruct a given slice of the data at various centers. The user can then maneuver the slider to find the best center by eye. - -{class}`~tomopyui.widgets.meta.Align` -- Creates Align tab in the dashboard -- Contains a [RadioButton](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html#RadioButtons) widgets to activate alignment, or to use a full or partial dataset. -- Contains several [Accordion](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html#Accordion) widgets: - 1. Plot Projection Images: - - Uses {class}`~tomopyui.widgets.meta.Plotter` class to plot projection images before alignment. One can click the "set projection range" button after selecting a window (see [this gif](./index.md#usage)), and it will set the projection range to the current range on the hyperslicer plot. - - Can save the animation as an mp4 using a button. [TK](https://en.wikipedia.org/wiki/To_come_(publishing)) - 2. Methods: - - Selection of various CUDA-based reconstruction algorithms for the alignment. Selecting more than one of these will perform the alignment multiple times (must be fixed in backend, first). - 3. Save Options: - - Various save options, documented in {class}`~tomopyui.widgets.meta.Align` - 4. Alignment options: - - Various alignment options, documented in {class}`~tomopyui.widgets.meta.Align` - -{class}`~tomopyui.widgets.meta.Recon` -- Subclass of {class}`~tomopyui.widgets.meta.Align`. Variations include what metadata to set, and some buttons/checkboxes. - -### Backend - -[Documentation TK](https://en.wikipedia.org/wiki/To_come_(publishing)) \ No newline at end of file +All of these objects talk to `view.py`, which holds the image viewers. \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index 8066bdd..067eb18 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,4 +1,11 @@ -# tomopyui +# TomoPyUI + +```{jupyter-execute} +:hide-code: + +from ipywidgets import Video, HBox, Layout +Video.from_file("_static/videos/alignment.mp4", autoplay=False) +``` Have you ever wondered to yourself one of the following: @@ -6,14 +13,16 @@ Have you ever wondered to yourself one of the following: - "I really wish I knew what was going on during automatic tomography data alignment, and that it wasn't just a black box filled with math that gives me a bad result" - "I really don't want to open another image stack in ImageJ" -`tomopyui` aims to provide a solution to these problems. Built on [tomopy](https://tomopy.readthedocs.io/en/latest/), [astra-toolbox](http://www.astra-toolbox.com/docs/install.html), [ipywidgets](https://ipywidgets.readthedocs.io/en/latest/), and [mpl-interactions](https://mpl-interactions.readthedocs.io/en/stable/index.html), `tomopyui` is a graphical user interface (GUI) that will allow you to: +`tomopyui` aims to provide a solution to these problems. Built on [tomopy](https://tomopy.readthedocs.io/en/latest/), [astra-toolbox](http://www.astra-toolbox.com/docs/install.html), [ipywidgets](https://ipywidgets.readthedocs.io/en/latest/), [bqplot](), and [bqplot-image-gl](https://pypi.org/project/bqplot-image-gl/) `tomopyui` is a highly interactive and reactive graphical user interface (GUI) that will allow you to: -- Import tomography data +- Import tomography data from various sources - raw beamline data (projections, references, and dark fields), or prenormalized data from another program (i.e., [TXM Wizard](https://sourceforge.net/projects/txm-wizard/)) - Find the data's center of rotation (manually, or automatically from [tomopy](https://tomopy.readthedocs.io/en/latest/) 's centering algorithms) - Iteratively align your data using [joint iterative reconstruction and reprojection](https://www.nature.com/articles/s41598-017-12141-9.pdf) and inspect the convergence at each iteration. - Look at your normalized/aligned/reconstructed data in the app, rather than pulling it up in ImageJ -- Try out all the reconstruction algorithms in a few clicks. Run them. Come back to folders filled with reconstructed data using all those algorithms. Some are better than others, and some are faster than others. -- Process a dataset quickly to find standard values, save alignment and reconstruction metadata in JSON files for batch reconstruction later on. (still in development progress) +- Try out all the reconstruction algorithms in a few clicks. Run them. Come back to folders filled with reconstructed data using all those algorithms. Some are better than others. +- Process a dataset quickly to find standard values, save alignment and reconstruction metadata in JSON files for batch reconstruction later on. + +At each part of this process, metadata about your data is saved so that you know what you did when you come back to it in a month or two or three or seven. This application was developed at the Stanford Synchrotron Radiation Lightsource ([SSRL](https://www-ssrl.slac.stanford.edu/)) to aid in our alignment and reconstruction processes. It could certainly use _your_ help! See the {doc}`contributing` page for more information on how you can get involved. @@ -22,24 +31,15 @@ This application was developed at the Stanford Synchrotron Radiation Lightsource Open up [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/getting_started/overview.html) after [installing](#install), and run the following in the first cell: ```{jupyter-execute} -%matplotlib ipympl import tomopyui.widgets.main as main -dashboard, file_import, center, prep, align, recon = main.create_dashboard() -dashboard +dashboard_output, dashboard, file_import, prep, center, align, recon, dataexplorer = main.create_dashboard("SSRL_62C") # can be "SSRL_62C", "ALS_832", or "APS" +dashboard_output ``` -You can click through the tabs that you will see on the dashboard to check out the options available. Most of the features cannot be used on this webpage, but it gives you a flavor of what comes with the app. - -Here is what it will look like when you open it on your computer: - -```{image} _static/images/frontpage_usage.gif - -``` +You can click through the tabs that you will see on the dashboard to check out the options available. Most of the features cannot be used without a Jupyter environment, but it gives you a flavor of what comes with the app. -There will be more examples of usage [TK](https://en.wikipedia.org/wiki/To_come_(publishing)) in the {doc}`examples` page. - -This shows you how to upload your data and check it out using the [mpl-interactions](https://mpl-interactions.readthedocs.io/en/stable/index.html) interactive [hyperslicer](https://mpl-interactions.readthedocs.io/en/stable/examples/hyperslicer.html) and [histogram](https://mpl-interactions.readthedocs.io/en/stable/examples/hist.html) widgets. +TomoPyUI is under development, and new features are being added with each commit. Video tutorials on usage can be found under the {doc}`examples` page -- the most recent walkthrough of its functionality is there, where SSRL 6-2C users from both [NREL](https://www.nrel.gov/) and [UCLA](https://www.chem.ucla.edu/scalar/) joined the TomoPyUI team for a tutorial. ## Install @@ -49,11 +49,6 @@ If you are new to installing conda/pip packages, and/or you do not currently hav ::: -:::{note} - -At the moment, this package only supports [tomopy](https://tomopy.readthedocs.io/en/latest/) and [astra-toolbox](http://www.astra-toolbox.com/docs/install.html) installed with CUDA. In the future, we will relax this requirement. - -::: ### Installing with CUDA Once you are finished installing CUDA, navigate to the directory in which you would like to install tomopyui (use cd in the anaconda prompt to navigate): @@ -92,30 +87,27 @@ Once you do that, you should see (tomopyui) ``` -instead of (base) in your anaconda prompt. Finally, your last step is to install tomopyui. From the main directory (the one that has setup.py in it), run: +instead of (base) in your anaconda prompt. Finally, your last step is to install TomoPyUI. From the main directory (the one that has setup.py in it), run: ``` -pip install . +pip install -e . ``` ### Installing without CUDA -Without CUDA, this program is useless for aligning/reconstructing tomography data. - -If you don't have CUDA and you just want to check out the ipywidgets, you can still do that using the environment.yml in the docs folder: +Without CUDA, you will miss out on some of the features in TomoPyUI. You can still install it by doing the following ``` cd tomopyui -cd docs -conda env create -f environment.yml +conda env create -f environment-nocuda.yml ``` Then, activate the environment and install: ``` -conda activate tomopyui-docs +conda activate tomopyui cd .. -pip install . +pip install -e . ``` _Follow the links below for in-depth installation page and API._ diff --git a/docs/install.md b/docs/install.md index 73f95db..e5a7586 100644 --- a/docs/install.md +++ b/docs/install.md @@ -1,20 +1,14 @@ # Installation -:::{note} - -At the moment, this package only supports [tomopy](https://tomopy.readthedocs.io/en/latest/) and [astra-toolbox](http://www.astra-toolbox.com/docs/install.html) installed with CUDA. In the future, we will relax this requirement. - -::: - ## Prerequisites -You will have to have the following set of hardware/software to run all of the current features of tomopyui: +You will have to have the following set of hardware/software to run all of the features of TomoPyUI: - anaconda (to create a python environment) - NVIDIA graphics card capable of CUDA 10.2+ - CUDA 10.2+ -If you just want to check out the UI, there is also a way to run that if you do not have a CUDA-enabled graphics card. See [this section](#installing-tomopyui-without-cuda). +You can also install without cuda if you don't have a CUDA-enabled graphics card. See [this section](#installing-tomopyui-without-cuda). Below are the installation instructions given you do not have any of this installed on your computer. @@ -42,7 +36,7 @@ This installation can be very confusing, and I hope to not confuse you further w :::{note} -I have only tested this on Windows machines. If someone would like to write up a "for Dummys" install instructions for Linux or Mac, be my guest. +I have only tested this on Windows machines. If someone would like to write up install instructions for Linux or Mac, be my guest. ::: @@ -101,9 +95,9 @@ Retry: to see if your computer can recognize the nvcc command. -## Installing tomopyui +## Installing TomoPyUI -First, navigate to where you want to install tomopyui: +First, navigate to where you want to install TomoPyUI: ``` cd your-install-directory-name @@ -114,8 +108,13 @@ Clone the github repository: ``` git clone https://github.com/samwelborn/tomopyui.git ``` +:::{note} + +If you don't want to download the entire repository, you can just download the environment.yml file [here](https://github.com/samwelborn/tomopyui/blob/main/environment.yml) for CUDA or [here](https://github.com/samwelborn/tomopyui/blob/main/environment-nocuda.yml) for non-CUDA. Along with setting up your environment, this should install the latest stable release of tomopyui from [PyPI](https://pypi.org/project/tomopyui/). -Navigate on into the tomopyui directory: +::: + +Navigate on into the TomoPyUI directory: ``` cd tomopyui @@ -133,33 +132,32 @@ This will install a new environment called tomopyui. To activate this environmen conda activate tomopyui ``` -Once you do that, you should see (tomopyui) instead of (base) in your anaconda prompt. Your last step is to install tomopyui. From the main directory (the one that has setup.py in it), run: +Once you do that, you should see (tomopyui) instead of (base) in your anaconda prompt. This should have installed the latest release of tomopyui from PyPI. If you want to install the latest version from the master branch, you can run: ``` pip install . ``` -## Installing tomopyui without CUDA +in the tomopyui directory (the one with setup.py). -Without CUDA, this program is useless for aligning/reconstructing tomography data. +## Installing TomoPyUI without CUDA -If you don't have CUDA and just want to check out the ipywidgets, you can still do that using the environment.yml in the docs folder: +If you don't have CUDA and just want to check out the ipywidgets, you can still do that using the [environment-nocuda.yml](https://github.com/samwelborn/tomopyui/blob/main/environment-nocuda.yml) file: ``` cd tomopyui -cd docs -conda env create -f environment.yml +conda env create -f environment-nocuda.yml ``` Then, activate the environment: ``` -conda activate tomopyui-docs +conda activate tomopyui-nocuda ``` -## Installing tomopyui for development +## Installing TomoPyUI for development -First create your own fork of . If you are familiar with command-line git, you can do it that way. Otherwise, download [GitHub Desktop](https://desktop.github.com/) and download the tomopyui repository from there. Follow the install instructions above, then run: +First create your own fork of . If you are familiar with command-line git, you can do it that way. Otherwise, download [GitHub Desktop](https://desktop.github.com/) and download the TomoPyUI repository from there. Follow the install instructions above, then run: ``` pip install -e . diff --git a/environment-nocuda.yml b/environment-nocuda.yml index 8b885ea..27deff2 100644 --- a/environment-nocuda.yml +++ b/environment-nocuda.yml @@ -8,6 +8,12 @@ channels: - defaults dependencies: - python=3.9 + - matplotlib + - ffmpeg + - dask + - dask-image + - olefile + - git - pip - tomopy - dxchange @@ -20,6 +26,7 @@ dependencies: - ca-certificates - certifi - openssl + - astra-toolbox - joblib - ipympl [--no-deps] - bqplot @@ -34,5 +41,5 @@ dependencies: - sphinx-gallery - myst-nb [--no-deps] - pip: - - mpl-interactions - - bqplot-image-gl \ No newline at end of file + - bqplot-image-gl + - tomopyui \ No newline at end of file diff --git a/environment.yml b/environment.yml index aba045c..a4d3569 100644 --- a/environment.yml +++ b/environment.yml @@ -1,13 +1,19 @@ name: tomopyui channels: - - conda-forge/label/widgetsnbextension_dev + - conda-forge/label/widgetsnbextension_rc - conda-forge/label/jupyterlab_widgets_rc - - conda-forge/label/ipywidgets_dev + - conda-forge/label/ipywidgets_rc - astra-toolbox - conda-forge - defaults dependencies: - python=3.9 + - matplotlib + - ffmpeg + - dask + - dask-image + - olefile + - git - pip - tomopy - dxchange @@ -37,5 +43,5 @@ dependencies: - sphinx-gallery - myst-nb [--no-deps] - pip: - - mpl-interactions - - bqplot-image-gl \ No newline at end of file + - bqplot-image-gl + - tomopyui \ No newline at end of file diff --git a/testing-fileexplorer.ipynb b/testing-fileexplorer.ipynb deleted file mode 100644 index bb7f625..0000000 --- a/testing-fileexplorer.ipynb +++ /dev/null @@ -1,279 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "17397d77-1bf5-45cc-bdf3-485ac71ae249", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "43c37f82b115484c84b573105dc44190", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Tab(children=(VBox(children=(HBox(children=(FileChooser(path='C:\\Users\\samwe\\Documents\\tomopyui', filename='',…" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "%reload_ext autoreload\n", - "%autoreload 2\n", - "%matplotlib ipympl\n", - "import tomopyui.widgets.main as main\n", - "\n", - "dashboard, file_import, center, prep, align, recon = main.create_dashboard()\n", - "dashboard" - ] - }, - { - "cell_type": "code", - "execution_count": 681, - "id": "121051c7-7d7f-4510-9a78-1c577a8fd14c", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 681, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from tomopyui.backend.tomorecon import TomoRecon\n", - "TomoRecon(recon)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "3bd03248-bd42-4489-8e7a-cadc1f4b8dde", - "metadata": {}, - "outputs": [], - "source": [ - "from tomopyui.widgets.meta import DataExplorer\n", - "image_metadata = {\"titles\": [\"Before Alignment\",\"After Alignment\"],\n", - " \"linked_stacks\": False\n", - " }\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0d7311b2-bb4d-4a6a-a169-996dbc4c6ca5", - "metadata": {}, - "outputs": [], - "source": [ - "from tomopyui.backend.tomodata import TomoData\n", - "\n", - "a = DataExplorer(file_import, image_metadata, align)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "32ba5530-6b1c-48c1-ac6c-669bf8c80d8d", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "2ab2092b2ebc4d6e95cb1b00dddadbfc", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "VBox(children=(VBox(children=(HBox(children=(VBox(children=(Label(value='Original Data', layout=Layout(justify…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "a._create_plotter_filebrowser()\n", - "display(a.data_plotter)" - ] - }, - { - "cell_type": "code", - "execution_count": 439, - "id": "ac2cb67c-34af-4f9d-a1b4-0abd0e4f1d24", - "metadata": {}, - "outputs": [ - { - "ename": "TraitError", - "evalue": "The 'description' trait of a Checkbox instance expected a unicode string, not the PureWindowsPath PureWindowsPath('C:/Users/Sam/Documents/tomopyui/tomopyui/data/20220113-1000-alignment/20220113-1001-sirt/conv.npy').", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mTraitError\u001b[0m Traceback (most recent call last)", - "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_34256/2460162098.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 16\u001b[0m \u001b[0mexperiments\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m{\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 17\u001b[0m \u001b[0moptions_widget\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwidgets\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mVBox\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlayout\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m{\u001b[0m\u001b[1;34m'overflow'\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;34m'auto'\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 18\u001b[1;33m \u001b[0mdefault_options\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[0mwidgets\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mCheckbox\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdescription\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0meachfilename\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0meachfilename\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mAllfileslist\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 19\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 20\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mwhentextischanged\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mchange\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_34256/2460162098.py\u001b[0m in \u001b[0;36m\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 16\u001b[0m \u001b[0mexperiments\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m{\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 17\u001b[0m \u001b[0moptions_widget\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwidgets\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mVBox\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlayout\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m{\u001b[0m\u001b[1;34m'overflow'\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;34m'auto'\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 18\u001b[1;33m \u001b[0mdefault_options\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[0mwidgets\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mCheckbox\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdescription\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0meachfilename\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0meachfilename\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mAllfileslist\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 19\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 20\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mwhentextischanged\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mchange\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\anaconda3\\envs\\tomopyui-nocuda\\lib\\site-packages\\ipywidgets\\widgets\\widget_bool.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, value, **kwargs)\u001b[0m\n\u001b[0;32m 43\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mvalue\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'value'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 45\u001b[1;33m \u001b[0msuper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 46\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 47\u001b[0m \u001b[0m_model_name\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mUnicode\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'BoolModel'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtag\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msync\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\anaconda3\\envs\\tomopyui-nocuda\\lib\\site-packages\\ipywidgets\\widgets\\widget.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, **kwargs)\u001b[0m\n\u001b[0;32m 427\u001b[0m \u001b[1;34m\"\"\"Public constructor\"\"\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 428\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_model_id\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpop\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'model_id'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 429\u001b[1;33m \u001b[0msuper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 430\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 431\u001b[0m \u001b[0mWidget\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_call_widget_constructed\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\anaconda3\\envs\\tomopyui-nocuda\\lib\\site-packages\\traitlets\\traitlets.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1077\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mkey\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1078\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mhas_trait\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1079\u001b[1;33m \u001b[0msetattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1080\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1081\u001b[0m \u001b[1;31m# passthrough args that don't set traits to super\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\anaconda3\\envs\\tomopyui-nocuda\\lib\\site-packages\\traitlets\\traitlets.py\u001b[0m in \u001b[0;36m__set__\u001b[1;34m(self, obj, value)\u001b[0m\n\u001b[0;32m 604\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mTraitError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'The \"%s\" trait is read-only.'\u001b[0m \u001b[1;33m%\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 605\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 606\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mset\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 607\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 608\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_validate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\anaconda3\\envs\\tomopyui-nocuda\\lib\\site-packages\\traitlets\\traitlets.py\u001b[0m in \u001b[0;36mset\u001b[1;34m(self, obj, value)\u001b[0m\n\u001b[0;32m 578\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 579\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mset\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 580\u001b[1;33m \u001b[0mnew_value\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_validate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 581\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 582\u001b[0m \u001b[0mold_value\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mobj\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_trait_values\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\anaconda3\\envs\\tomopyui-nocuda\\lib\\site-packages\\traitlets\\traitlets.py\u001b[0m in \u001b[0;36m_validate\u001b[1;34m(self, obj, value)\u001b[0m\n\u001b[0;32m 610\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 611\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'validate'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 612\u001b[1;33m \u001b[0mvalue\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalidate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 613\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mobj\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_cross_validation_lock\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mFalse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 614\u001b[0m \u001b[0mvalue\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_cross_validate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\anaconda3\\envs\\tomopyui-nocuda\\lib\\site-packages\\traitlets\\traitlets.py\u001b[0m in \u001b[0;36mvalidate\u001b[1;34m(self, obj, value)\u001b[0m\n\u001b[0;32m 2188\u001b[0m \u001b[0mmsg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;34m\"Could not decode {!r} for unicode trait '{}' of {} instance.\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2189\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mTraitError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mclass_of\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2190\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0merror\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2191\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2192\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mfrom_string\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0ms\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\anaconda3\\envs\\tomopyui-nocuda\\lib\\site-packages\\traitlets\\traitlets.py\u001b[0m in \u001b[0;36merror\u001b[1;34m(self, obj, value, error, info)\u001b[0m\n\u001b[0;32m 690\u001b[0m e = \"The '%s' trait expected %s, not %s.\" % (\n\u001b[0;32m 691\u001b[0m self.name, self.info(), describe(\"the\", value))\n\u001b[1;32m--> 692\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mTraitError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 693\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 694\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mget_metadata\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdefault\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;31mTraitError\u001b[0m: The 'description' trait of a Checkbox instance expected a unicode string, not the PureWindowsPath PureWindowsPath('C:/Users/Sam/Documents/tomopyui/tomopyui/data/20220113-1000-alignment/20220113-1001-sirt/conv.npy')." - ] - } - ], - "source": [ - "import pathlib\n", - "root = a.obj.run_list[0][\"20220113-0958-sirt\"][\"parent_fpath\"]\n", - "Allfileslist = []\n", - "alldirslist = []\n", - "for path, subdirs, files in os.walk(root):\n", - " Alldirslist = [path] + [subdirs]\n", - " Allfileslist = []\n", - " for name in files:\n", - " Allfileslist.append(pathlib.PurePath(path, name))\n", - " \n", - "import ipywidgets as widgets\n", - "\n", - "\n", - "#Search box + generate some checboxes\n", - "search_widget = widgets.Text(placeholder='Type for older experiments', description= 'Search:',value='')\n", - "experiments = {}\n", - "options_widget = widgets.VBox(layout={'overflow': 'auto'})\n", - "default_options = [widgets.Checkbox(description=eachfilename, value=False) for eachfilename in Allfileslist[-10:]]\n", - "\n", - "def whentextischanged (change):\n", - " \"\"\"Dynamically update the widget experiments\"\"\"\n", - " search_input = change['new']\n", - " Allfileslist = str([Allfileslist.name for file in Allfileslist])\n", - " if search_input == '':\n", - " # Reset search field, default to last 9 experiments\n", - " new_options = default_options\n", - " else:\n", - " # Filter by search\n", - " close_matches = [x for x in Allfileslist if search_input.lower() in x.lower()][:10]\n", - " for name in close_matches:\n", - " if name not in experiments:\n", - " experiments[name] = widgets.Checkbox(description=name, value=False)\n", - " new_options = [experiments[eachfilename] for eachfilename in close_matches]\n", - "\n", - " options_widget.children = new_options\n", - "\n", - "#Generate the vbox, search\n", - "multi_select = widgets.VBox([search_widget, options_widget])\n", - "search_widget.observe(whentextischanged, names='value')\n", - "multi_select" - ] - }, - { - "cell_type": "code", - "execution_count": 694, - "id": "2c23105b-de09-40e2-a1ec-95094ac05559", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'recon'" - ] - }, - "execution_count": 694, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "a.selected_data_analysis_type" - ] - }, - { - "cell_type": "code", - "execution_count": 719, - "id": "b55db22d-233b-4b8c-a140-0cb709139c57", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "03ef40eb70e943cca06c14d2a7b02bf9", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "VBox(children=(HBox(children=(VBox(children=(Label(value='Original Data', layout=Layout(justify_content='cente…" - ] - }, - "execution_count": 719, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from tomopyui.widgets.meta import Filebrowser\n", - "fb = Filebrowser()\n", - "fb.create_file_browser()\n", - "fb.filebrowser" - ] - }, - { - "cell_type": "code", - "execution_count": 721, - "id": "e7b296e8-de5d-407e-bba4-74c115d7e69b", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'C:\\\\Users\\\\Sam\\\\Documents\\\\tomopyui\\\\tomopyui\\\\data\\\\20220113-1658-recon\\\\20220113-1658-sirt'" - ] - }, - "execution_count": 721, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "str(fb.selected_method)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d4c4e86b-62e1-4eab-acdd-2969f1508325", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/tests/test_tomopyui.py b/tests/test_tomopyui.py deleted file mode 100644 index 363b3e2..0000000 --- a/tests/test_tomopyui.py +++ /dev/null @@ -1,2 +0,0 @@ -def test_something(): - pass diff --git a/tomopyui/_sharedvars.py b/tomopyui/_sharedvars.py index d8a95fb..01fcf17 100644 --- a/tomopyui/_sharedvars.py +++ b/tomopyui/_sharedvars.py @@ -91,4 +91,4 @@ "butterworth", } -extend_description_style = {"description_width": "auto", "font_family": "Helvetica"} \ No newline at end of file +extend_description_style = {"description_width": "auto", "font_family": "Helvetica"} diff --git a/tomopyui/_version.py b/tomopyui/_version.py index f102a9c..6526deb 100644 --- a/tomopyui/_version.py +++ b/tomopyui/_version.py @@ -1 +1 @@ -__version__ = "0.0.1" +__version__ = "0.0.7" diff --git a/tomopyui/backend/io.py b/tomopyui/backend/io.py new file mode 100644 index 0000000..46a64ee --- /dev/null +++ b/tomopyui/backend/io.py @@ -0,0 +1,4059 @@ +import numpy as np +import tifffile as tf +import tomopy.prep.normalize as tomopy_normalize +import os +import json +import dxchange +import re +import olefile +import pathlib +import tempfile +import dask.array as da +import dask +import shutil +import copy +import multiprocessing as mp +import pandas as pd +import time +import datetime +import h5py +import dask_image.imread + +from abc import ABC, abstractmethod +from tomopy.sim.project import angles as angle_maker +from tomopyui.backend.util.dxchange.reader import read_ole_metadata, read_xrm, read_txrm +from tomopyui.backend.util.dask_downsample import pyramid_reduce_gaussian +from skimage.transform import rescale +from joblib import Parallel, delayed +from ipywidgets import * +from functools import partial +from skimage.util import img_as_float32 + + +class IOBase: + """ + Base class for all data imported. Contains some setter/getter attributes that also + set other attributes, such setting number of pixels for a numpy array. + + Also has methods such as _check_downsampled_data, which checks for previously + uploaded downsampled data, and writes it in a subfolder if not already there. + + _file_finder is under this class, but logically it does not belong here. TODO. + """ + + # Save keys + normalized_projections_hdf_key = "normalized_projections.hdf5" + normalized_projections_tif_key = "normalized_projections.tif" + normalized_projections_npy_key = "normalized_projections.npy" + + # hdf keys + hdf_key_raw_proj = "/exchange/data" + hdf_key_raw_flats = "/exchange/data_white" + hdf_key_raw_darks = "/exchange/data_dark" + hdf_key_theta = "/exchange/theta" + hdf_key_norm_proj = "/process/normalized/data" + hdf_key_norm = "/process/normalized/" + hdf_key_ds = "/process/downsampled/" + hdf_key_ds_0 = "/process/downsampled/0/" + hdf_key_ds_1 = "/process/downsampled/1/" + hdf_key_ds_2 = "/process/downsampled/2/" + hdf_key_data = "data" # to be added after downsampled/0,1,2/... + hdf_key_bin_frequency = "frequency" # to be added after downsampled/0,1,2/... + hdf_key_bin_centers = "bin_centers" # to be added after downsampled/0,1,2/... + hdf_key_image_range = "image_range" # to be added after downsampled/0,1,2/... + hdf_key_bin_edges = "bin_edges" + hdf_key_percentile = "percentile" + hdf_key_ds_factor = "ds_factor" + hdf_key_process = "/process" + + hdf_keys_ds_hist = [ + hdf_key_bin_frequency, + hdf_key_bin_centers, + hdf_key_image_range, + hdf_key_percentile, + ] + hdf_keys_ds_hist_scalar = [hdf_key_ds_factor] + + def __init__(self): + + self._data = np.random.rand(10, 100, 100) + self.data = self._data + self.data_ds = self.data + self.imported = False + self._filepath = pathlib.Path() + self.dtype = None + self.shape = None + self.pxX = self._data.shape[2] + self.pxY = self._data.shape[1] + self.pxZ = self._data.shape[0] + self.size_gb = None + self.filedir = None + self.filename = None + self.extension = None + self.parent = None + self.energy = None + self.raw = False + self.single_file = False + self.hist = None + self.allowed_extensions = [".npy", ".tiff", ".tif"] + self.metadata = Metadata_General_Prenorm() + self.hdf_file = None + + @property + def data(self): + return self._data + + @data.setter + def data(self, value): + (self.pxZ, self.pxY, self.pxX) = self._data.shape + self.rangeX = (0, self.pxX - 1) + self.rangeY = (0, self.pxY - 1) + self.rangeZ = (0, self.pxZ - 1) + self.size_gb = self._data.nbytes / 1048576 / 1000 + self.dtype = self._data.dtype + self._data = value + + @property + def filepath(self): + return self._filepath + + @filepath.setter + def filepath(self, value): + self.filedir = value.parent + self.filename = value.name + self.extension = value.suffix + self._filepath = value + + def _check_and_open_hdf(hdf_func): + def inner_func(self, *args, **kwargs): + self._filepath = self.filedir / self.filename + if self.hdf_file: + hdf_func(self, *args, **kwargs) + else: + self._open_hdf_file_read_only(self.filepath) + + hdf_func(self, *args, **kwargs) + + return inner_func + + def _check_and_open_hdf_read_write(hdf_func): + def inner_func(self, *args, **kwargs): + self._filepath = self.filedir / self.filename + if self.hdf_file: + hdf_func(self, *args, **kwargs) + else: + self._open_hdf_file_read_write(self.filepath) + hdf_func(self, *args, **kwargs) + + return inner_func + + def _open_hdf_file_read_only(self, filepath=None): + if filepath is None: + filepath = self.filepath + self._close_hdf_file() + self.hdf_file = h5py.File(filepath, "r") + + def _open_hdf_file_read_write(self, filepath=None): + if filepath is None: + filepath = self.filepath + self._close_hdf_file() + self.hdf_file = h5py.File(filepath, "r+") + + def _open_hdf_file_append(self, filepath=None): + if filepath is None: + filepath = self.filepath + self._close_hdf_file() + self.hdf_file = h5py.File(filepath, "a") + + @_check_and_open_hdf + def _load_hdf_normalized_data_into_memory(self): + # load normalized data into memory + self._data = self.hdf_file[self.hdf_key_norm_proj][:] + self.data = self._data + pyramid_level = self.hdf_key_ds + str(0) + "/" + try: + self.hist = { + key: self.hdf_file[self.hdf_key_norm + key][:] + for key in self.hdf_keys_ds_hist + } + except KeyError: + # load downsampled histograms if regular histograms don't work + self.hist = { + key: self.hdf_file[pyramid_level + key][:] + for key in self.hdf_keys_ds_hist + } + for key in self.hdf_keys_ds_hist_scalar: + self.hist[key] = self.hdf_file[pyramid_level + key][()] + + ds_data_key = pyramid_level + self.hdf_key_data + self.data_ds = self.hdf_file[ds_data_key] + + @_check_and_open_hdf + def _unload_hdf_normalized_and_ds(self): + self._data = self.hdf_file[self.hdf_key_norm_proj] + self.data = self._data + pyramid_level = self.hdf_key_ds + str(0) + "/" + ds_data_key = pyramid_level + self.hdf_key_data + self.data_ds = self.hdf_file[ds_data_key] + + @_check_and_open_hdf + def _load_hdf_ds_data_into_memory(self, pyramid_level=0): + pyramid_level = self.hdf_key_ds + str(pyramid_level) + "/" + ds_data_key = pyramid_level + self.hdf_key_data + self.data_ds = self.hdf_file[ds_data_key][:] + self.hist = { + key: self.hdf_file[pyramid_level + key][:] for key in self.hdf_keys_ds_hist + } + for key in self.hdf_keys_ds_hist_scalar: + self.hist[key] = self.hdf_file[pyramid_level + key][()] + self._data = self.hdf_file[self.hdf_key_norm_proj] + self.data = self._data + + @_check_and_open_hdf + def _return_ds_data(self, pyramid_level=0, px_range=None): + + pyramid_level = self.hdf_key_ds + str(pyramid_level) + "/" + ds_data_key = pyramid_level + self.hdf_key_data + if px_range is None: + self.data_returned = self.hdf_file[ds_data_key][:] + else: + x = px_range[0] + y = px_range[1] + self.data_returned = self.hdf_file[ds_data_key] + self.data_returned = copy.deepcopy( + self.data_returned[:, y[0] : y[1], x[0] : x[1]] + ) + + @_check_and_open_hdf + def _return_data(self, px_range=None): + if px_range is None: + self.data_returned = self.hdf_file[self.hdf_key_norm_proj][:] + else: + x = px_range[0] + y = px_range[1] + self.data_returned = self.hdf_file[self.hdf_key_norm_proj][ + :, y[0] : y[1], x[0] : x[1] + ] + + @_check_and_open_hdf + def _return_hist(self, pyramid_level=0): + pyramid_level = self.hdf_key_ds + str(pyramid_level) + "/" + ds_data_key = pyramid_level + self.hdf_key_data + self.hist_returned = { + key: self.hdf_file[pyramid_level + key][:] for key in self.hdf_keys_ds_hist + } + for key in self.hdf_keys_ds_hist_scalar: + self.hist_returned[key] = self.hdf_file[pyramid_level + key][()] + + @_check_and_open_hdf + def _delete_downsampled_data(self): + if self.hdf_key_ds in self.hdf_file: + del self.hdf_file[self.hdf_key_ds] + + def _close_hdf_file(self): + if self.hdf_file: + self.hdf_file.close() + + def _np_hist(self): + r = [np.min(self.data), np.max(self.data)] + bins = 200 if self.data.size > 200 else self.data.size + hist = np.histogram(self.data, range=r, bins=bins) + percentile = np.percentile(self.data.flatten(), q=(0.5, 99.5)) + bin_edges = hist[1] + return hist, r, bins, percentile + + def _dask_hist(self): + r = [da.min(self.data), da.max(self.data)] + bins = 200 if self.data.size > 200 else self.data.size + hist = da.histogram(self.data, range=r, bins=bins) + percentile = da.percentile(self.data.flatten(), q=(0.5, 99.5)) + bin_edges = hist[1] + return hist, r, bins, percentile + + def _dask_bin_centers(self, grp, write=False, savedir=None): + tmp_filepath = copy.copy(self.filepath) + tmp_filedir = copy.copy(self.filedir) + if savedir is None: + self.filedir = self.import_savedir + else: + self.filedir = savedir + self.filepath = self.filedir / self.normalized_projections_hdf_key + self._open_hdf_file_append() + bin_edges = da.from_array(self.hdf_file[grp + self.hdf_key_bin_edges]) + bin_centers = da.from_array( + [(bin_edges[i] + bin_edges[i + 1]) / 2 for i in range(len(bin_edges) - 1)] + ) + if write and savedir is not None: + data_dict = {grp + self.hdf_key_bin_centers: bin_centers} + self.dask_data_to_h5(data_dict, savedir=savedir) + self.filepath = tmp_filepath + self.filedir = tmp_filedir + return bin_centers + + def _dask_hist_and_save_data(self): + hist, r, bins, percentile = self._dask_hist() + grp = IOBase.hdf_key_norm + "/" + data_dict = { + self.hdf_key_norm_proj: self.data, + grp + self.hdf_key_bin_frequency: hist[0], + grp + self.hdf_key_bin_edges: hist[1], + grp + self.hdf_key_image_range: r, + grp + self.hdf_key_percentile: percentile, + } + self.dask_data_to_h5(data_dict, savedir=self.import_savedir) + self._dask_bin_centers(grp, write=True, savedir=self.import_savedir) + + def _np_hist_and_save_data(self): + hist, r, bins, percentile = self._np_hist() + grp = IOBase.hdf_key_norm + "/" + # self._data = da.from_array(self.data) + # self.data = self._data + data_dict = { + self.hdf_key_norm_proj: self.data, + grp + self.hdf_key_bin_frequency: hist[0], + grp + self.hdf_key_bin_edges: hist[1], + grp + self.hdf_key_image_range: r, + grp + self.hdf_key_percentile: percentile, + } + self.dask_data_to_h5(data_dict, savedir=self.import_savedir) + self._dask_bin_centers(grp, write=True, savedir=self.import_savedir) + + def _check_downsampled_data(self, label=None): + """ + Checks to see if there is downsampled data in a directory. If it doesn't it + will write new downsampled data. + + Parameters + ---------- + energy : str, optional + Energy in string format "\d\d\d\d\d.\d\d" + label : widgets.Label, optional + This is a label that will update in the frontend. + """ + filedir = self.filedir + files = [pathlib.Path(f).name for f in os.scandir(filedir) if not f.is_dir()] + if self.normalized_projections_hdf_key in files: + self._filepath = filedir / self.normalized_projections_hdf_key + self.filepath = self._filepath + self._open_hdf_file_read_write() + if self.hdf_key_ds in self.hdf_file: + self._load_hdf_ds_data_into_memory() + else: + pyramid_reduce_gaussian( + self.data, + io_obj=self, + ) + self._load_hdf_ds_data_into_memory() + + def _file_finder(self, filedir, filetypes: list): + """ + Used to find files of a given filetype in a directory. TODO: can go elsewhere. + + Parameters + ---------- + filedir : pathlike, relative or absolute + Folder in which to search for files. + filetypes : list of str + Filetypes list. e.g. [".txt", ".npy"] + """ + files = [pathlib.PurePath(f) for f in os.scandir(filedir) if not f.is_dir()] + files_with_ext = [ + file.name for file in files if any(x in file.name for x in filetypes) + ] + return files_with_ext + + def _file_finder_fullpath(self, filedir, filetypes: list): + """ + Used to find files of a given filetype in a directory. TODO: can go elsewhere. + + Parameters + ---------- + filedir : pathlike, relative or absolute + Folder in which to search for files. + filetypes : list of str + Filetypes list. e.g. [".txt", ".npy"] + """ + files = [pathlib.Path(f) for f in os.scandir(filedir) if not f.is_dir()] + fullpaths_of_files_with_ext = [ + file for file in files if any(x in file.name for x in filetypes) + ] + return fullpaths_of_files_with_ext + + +class ProjectionsBase(IOBase, ABC): + + """ + Base class for projections data. Abstract methods include importing/exporting data + and metadata. One can import a file directory, or a particular file within a + directory. Aliases give options for users to extract their data with other keywords. + + """ + + # https://stackoverflow.com/questions/4017572/ + # how-can-i-make-an-alias-to-a-non-function-member-attribute-in-a-python-class + aliases = { + "prj_imgs": "data", + "num_angles": "pxZ", + "width": "pxX", + "height": "pxY", + "px_range_x": "rangeX", + "px_range_y": "rangeY", + "px_range_z": "rangeZ", # Could probably fix these + } + + def __init__(self): + super().__init__() + self.px_size = 1 + self.angles_rad = None + self.angles_deg = None + self.saved_as_tiff = False + self.tiff_folder = False + + def __setattr__(self, name, value): + name = self.aliases.get(name, name) + object.__setattr__(self, name, value) + + def __getattr__(self, name): + if name == "aliases": + raise AttributeError # http://nedbatchelder.com/blog/201010/surprising_getattr_recursion.html + name = self.aliases.get(name, name) + return object.__getattribute__(self, name) + + def save_normalized_as_tiff(self): + """ + Saves current self.data under the current self.filedir as + self.normalized_projections_tif_key + """ + tf.imwrite(self.filedir / str(self.normalized_projections_tif_key), self.data) + + def dask_data_to_h5(self, data_dict, savedir=None): + """ + Brings lazy dask arrays to hdf5. + + Parameters + ---------- + data_dict: dict + Dictionary like {"/path/to/data": data} + savedir: pathlib.Path + Optional. Will default to self.filedir + """ + if savedir is None: + filedir = self.filedir + else: + filedir = savedir + for key in data_dict: + if not isinstance(data_dict[key], da.Array): + data_dict[key] = da.from_array(data_dict[key]) + da.to_hdf5( + filedir / self.normalized_projections_hdf_key, + data_dict, + ) + + def make_import_savedir(self, folder_name): + """ + Creates a save directory to put projections into. + """ + self.import_savedir = pathlib.Path(self.filedir / folder_name) + if self.import_savedir.exists(): + now = datetime.datetime.now() + dt_str = now.strftime("%Y%m%d-%H%M-") + save_name = dt_str + folder_name + self.import_savedir = pathlib.Path(self.filedir / save_name) + if self.import_savedir.exists(): + dt_str = now.strftime("%Y%m%d-%H%M%S-") + save_name = dt_str + folder_name + self.import_savedir = pathlib.Path(self.filedir / save_name) + self.import_savedir.mkdir() + + @abstractmethod + def import_metadata(self, filedir): + ... + + @abstractmethod + def import_filedir_projections(self, filedir): + ... + + @abstractmethod + def import_file_projections(self, filepath): + ... + + +class Projections_Child(ProjectionsBase): + def __init__(self, parent_projections): + super().__init__() + self.parent_projections = parent_projections + + def copy_from_parent(self): + # self.parent_projections._unload_hdf_normalized_and_ds() + self._data = self.parent_projections.data + self.data = self._data + self.data_ds = self.parent_projections.data_ds + self.hist = self.parent_projections.hist + self.hdf_file = self.parent_projections.hdf_file + self.filedir = self.parent_projections.filedir + self.filepath = self.parent_projections.filepath + self.filename = self.parent_projections.filename + + def deepcopy_data_from_parent(self): + self.parent_projections._load_hdf_normalized_data_into_memory() + self._data = copy.deepcopy(self.parent_projections.data[:]) + self.data = self._data + + def get_parent_data_from_hdf(self, px_range=None): + """ + Gets data from hdf file and stores it in self.data. + Parameters + ---------- + px_range: tuple + tuple of two two-element lists - (px_range_x, px_range_y) + """ + self.data_ds = None + self.parent_projections._unload_hdf_normalized_and_ds() + self.parent_projections._return_data(px_range) + self._data = self.parent_projections.data_returned + self.data = self._data + self.parent_projections._close_hdf_file() + + def get_parent_data_ds_from_hdf(self, pyramid_level, px_range=None): + self.parent_projections._unload_hdf_normalized_and_ds() + self.parent_projections._return_ds_data(pyramid_level, px_range) + self.data_ds = self.parent_projections.data_returned + self.parent_projections._close_hdf_file() + + def get_parent_hists(self, pyramid_level): + self.parent_projections._unload_hdf_normalized_and_ds() + self.parent_projections._return_hist(pyramid_level) + self.hist = self.parent_projections.hist_returned + self.parent_projections._close_hdf_file() + + def import_file_projections(self): + pass + + def import_filedir_projections(self): + pass + + def import_metadata(self): + pass + + +class Projections_Prenormalized(ProjectionsBase): + """ + Prenormalized projections base class. This allows one to bring in data from a file + directory or from a single tiff or npy, or multiple tiffs in a tiffstack. If the + data has been normalized using tomopyui (at least for SSRL), importing from a + folder will assume that "normalized_projections.npy" is in the folder, given that + there is an import_metadata.json in that folder. + + Each method uses a parent Uploader instance for callbacks. + + """ + + def import_filedir_projections(self, Uploader): + """ + Similar process to import_file_projections. This one will be triggered if the + tiff folder checkbox is selected on the frontend. + """ + if Uploader.imported_metadata and not self.tiff_folder: + self.import_file_projections(Uploader) + return + self.tic = time.perf_counter() + Uploader.import_status_label.value = "Importing file directory." + self.filedir = Uploader.filedir + if not Uploader.imported_metadata: + self.make_import_savedir(str(self.metadata.metadata["energy_str"] + "eV")) + self.metadata.set_attributes_from_metadata_before_import(self) + cwd = os.getcwd() + os.chdir(self.filedir) + try: + self._data = dask_image.imread.imread("*.tif").astype(np.float32) + except: + self._data = dask_image.imread.imread("*.tiff").astype(np.float32) + self.data = self._data + self.metadata.set_metadata_from_attributes_after_import(self) + self.filedir = self.import_savedir + self.save_data_and_metadata(Uploader) + self._check_downsampled_data() + os.chdir(cwd) + self.filepath = self.import_savedir / self.normalized_projections_hdf_key + self.hdf_file = h5py.File(self.filepath) + self._close_hdf_file() + + def import_file_projections(self, Uploader): + """ + Will import a file selected on the frontend. Goes through several steps: + + 1. Set file directories/filenames/filepaths. + 2. Sets save directory + 3. Set projections attributes from the metadata supplied by frontend, if any. + 4. Determine what kind of file it is + 5. Upload + 6. Sets metadata from data info after upload + 7. Saves in the import directory. + """ + if self.tiff_folder: + self.import_filedir_projections(Uploader) + return + self.tic = time.perf_counter() + Uploader.import_status_label.value = "Importing single file." + self.imported = False + self.filedir = Uploader.filedir + self.filename = Uploader.filename + if self.filename is None or self.filename == "": + self.filename = str(Uploader.images_in_dir[0].name) + self.filepath = self.filedir / self.filename + if not Uploader.imported_metadata: + "trying to make importsavedir" + self.make_import_savedir(str(self.metadata.metadata["energy_str"] + "eV")) + self.metadata.set_attributes_from_metadata_before_import(self) + self.filedir = self.import_savedir + + # if import metadata is found in the directory, self.normalized_projections_npy_key + # will be uploaded. This behavior is probably OK if we stick to this file + # structure + if Uploader.imported_metadata: + files = [ + pathlib.Path(f).name for f in os.scandir(self.filedir) if not f.is_dir() + ] + if self.normalized_projections_hdf_key in files: + Uploader.import_status_label.value = ( + "Detected metadata and hdf5 file in this directory," + + " uploading normalized_projections.hdf5" + ) + elif ".npy" in files: + Uploader.import_status_label.value = ( + "Detected metadata and npy file in this directory," + + " uploading normalized_projections.npy" + ) + self._data = np.load( + self.filedir / "normalized_projections.npy" + ).astype(np.float32) + self.data = self._data + if Uploader.save_tiff_on_import_checkbox.value: + Uploader.import_status_label.value = "Saving projections as .tiff." + self.saved_as_tiff = True + self.save_normalized_as_tiff() + self.metadata.metadata["saved_as_tiff"] = True + self.metadata.set_attributes_from_metadata(self) + self._check_downsampled_data(label=Uploader.import_status_label) + self.imported = True + + elif any([x in self.filename for x in [".tif", ".tiff"]]): + self._data = dask_image.imread.imread(self.filepath).astype(np.float32) + self._data = da.where(da.isfinite(self._data), self._data, 0) + self.data = self._data + self.save_data_and_metadata(Uploader) + self.imported = True + self.filepath = self.import_savedir / self.normalized_projections_hdf_key + + elif ".npy" in self.filename: + self._data = np.load(self.filepath).astype(np.float32) + self._data = np.where(np.isfinite(self._data), self._data, 0) + self.data = self._data + self.save_data_and_metadata(Uploader) + self.imported = True + self.filepath = self.import_savedir / self.normalized_projections_hdf_key + + def make_angles(self): + """ + Makes angles based on self.angle_start, self.angle_end, and self.pxZ. + + Also converts to degrees. + """ + self.angles_rad = angle_maker( + self.pxZ, + ang1=self.angle_start, + ang2=self.angle_end, + ) + self.angles_deg = [x * 180 / np.pi for x in self.angles_rad] + + def import_metadata(self, filepath): + self.metadata = Metadata.parse_metadata_type(filepath) + self.metadata.load_metadata() + + def save_data_and_metadata(self, Uploader): + """ + Saves current data and metadata in import_savedir. + """ + self.filedir = self.import_savedir + self._dask_hist_and_save_data() + self.saved_as_tiff = False + if Uploader.save_tiff_on_import_checkbox.value: + Uploader.import_status_label.value = "Saving projections as .tiff." + self.saved_as_tiff = True + self.save_normalized_as_tiff() + self.metadata.metadata["saved_as_tiff"] = True + self.metadata.filedir = self.filedir + self.toc = time.perf_counter() + self.metadata.metadata["import_time"] = self.toc - self.tic + self.metadata.set_metadata_from_attributes_after_import(self) + self.metadata.save_metadata() + Uploader.import_status_label.value = "Checking for downsampled data." + self._check_downsampled_data(label=Uploader.import_status_label) + + +class RawProjectionsBase(ProjectionsBase, ABC): + """ + Base class for raw projections. Contains methods for normalization, and abstract + methods that are required in any subclass of this class for data import. Some + methods currently are not used by tomopyui (e.g. import_filedir_flats), but could + be used in the future. If you don't want to use some of these methods, just + write "def method(self):pass" to enable subclass instantiation. + """ + + def __init__(self): + super().__init__() + self.flats = None + self.flats_ind = None + self.darks = None + self.normalized = False + + def normalize_nf(self): + """ + Wrapper for tomopy's normalize_nf + """ + self._data = tomopy_normalize.normalize_nf( + self._data, self.flats, self.darks, self.flats_ind + ) + self._data = tomopy_normalize.minus_log(self._data) + self.data = self._data + self.raw = False + self.normalized = True + + # for Tomopy-based normalization + def normalize(self): + """ + Wrapper for tomopy's normalize. + """ + self._data = tomopy_normalize.normalize(self._data, self.flats, self.darks) + self._data = tomopy_normalize.minus_log(self._data) + self.data = self._data + self.raw = False + self.normalized = True + + # For dask-based normalization (should be faster) + def average_chunks(chunked_da): + """ + Method required for averaging within the normalize_and_average method. Takes + all chunks of a dask array + + Parameters + ---------- + chunked_da: dask array + Dask array with chunks to average over axis=0. + For ex. if you have an initial numpy array with shape (50, 100, 100), + and you chunk this along the 0th dimension into 5x (10, 100,100). + The output of this function will be of dimension (5, 100, 100). + + Returns + ------- + arr: dask array + Dask array that has been averaged along axis=0 with respect to how many + chunks it initially had. + """ + + @dask.delayed + def mean_on_chunks(a): + return np.mean(a, axis=0)[np.newaxis, ...] + + blocks = chunked_da.to_delayed().ravel() + results = [ + da.from_delayed( + mean_on_chunks(b), + shape=(1, chunked_da.shape[1], chunked_da.shape[2]), + dtype=np.float32, + ) + for b in blocks + ] + # arr not computed yet + arr = da.concatenate(results, axis=0, allow_unknown_chunksizes=True) + return arr + + def normalize_and_average( + projs, + flats, + dark, + flat_loc, + num_exposures_per_proj, + status_label=None, + compute=True, + ): + """ + Function takes pre-chunked dask arrays of projections, flats, darks, along + with flat locations within the projection images. + + The chunk size is the number of exposures per reference or per angle. + + projs should look like this: + Proj1 + Proj1 + Proj2 + Proj2 + ... + Proj10 + Proj10 + + flats should look like this: + flat1 + flat1 + flat1 + flat2 + flat2 + flat2 + + darks darks does not need to be location-specific, it can just be an ndarray + (only the median will be used across whole dark dataset) + + Normalization procedure will do the following: + 1. Average flats. Array above will turn into this: + flat1average + flat2average + 2. Subtract darks from average flats + 3. Subtract darks from individual projections (not averaged) + 4. Chunk the projections into bulks that are close to flats + 5. Normalize each projection (not averaged) based on the proximity to the + nearest flat + 6. Average the projections in their initial chunks + + Parameters + ---------- + TODO + + Returns + ------- + TODO + + """ + if status_label is not None: + status_label.value = "Averaging flatfields." + + # Averaging flats + flats_reduced = RawProjectionsBase.average_chunks(flats) + dark = np.median(dark, axis=0) + denominator = flats_reduced - dark + denominator = denominator.compute() + # Projection locations defined as the centerpoint between two reference + # collections + # Chunk the projections such that they will be divided by the nearest flat + # The first chunk of data will be divided by the first flat. + # The first chunk of data is likely smaller than the others. + + # Don't know if putting @staticmethod above a decorator will mess it up, so this + # fct is inside. This is kind of funky. TODO. + @dask.delayed + def divide_arrays(x, ind): + y = denominator[ind] + return np.true_divide(x, y) + + if len(flat_loc) != 1: + proj_locations = [ + int(np.ceil((flat_loc[i] + flat_loc[i + 1]) / 2)) + for i in range(len(flat_loc) - 1) + ] + chunk_setup = [int(np.ceil(proj_locations[0]))] + for i in range(len(proj_locations) - 1): + chunk_setup.append(proj_locations[i + 1] - proj_locations[i]) + chunk_setup.append(projs.shape[0] - sum(chunk_setup)) + chunk_setup = tuple(chunk_setup) + projs_rechunked = projs.rechunk( + {0: chunk_setup, 1: -1, 2: -1} + ) # chunk data + projs_rechunked = projs_rechunked - dark + if status_label is not None: + status_label.value = f"Dividing by flatfields and taking -log." + blocks = projs_rechunked.to_delayed().ravel() + results = [ + da.from_delayed( + divide_arrays(b, i), + shape=( + chunksize, + projs_rechunked.shape[1], + projs_rechunked.shape[2], + ), + dtype=np.float32, + ) + for i, (b, chunksize) in enumerate(zip(blocks, chunk_setup)) + ] + arr = da.concatenate(results, axis=0, allow_unknown_chunksizes=True) + else: + # if only 1 set of flats was taken, just divide normally. + arr = projs / flats_reduced + + arr = arr.rechunk((num_exposures_per_proj, -1, -1)) + arr = RawProjectionsBase.average_chunks(arr).astype(np.float32) + arr = -da.log(arr) + if compute: + arr = arr.compute() + + return arr + + @staticmethod + def normalize_no_locations_no_average( + projs, + flats, + dark, + status_label=None, + compute=True, + ): + """ + Normalize using dask arrays. Only averages references and normalizes. + """ + if status_label is not None: + status_label.value = "Averaging flatfields." + flat_mean = np.mean(flats, axis=0) + dark = np.median(dark, axis=0) + denominator = flat_mean - dark + if status_label is not None: + status_label.value = f"Dividing by flatfields and taking -log." + projs = projs.rechunk({0: "auto", 1: -1, 2: -1}) + projs = projs / denominator + projs = -da.log(projs) + if compute: + projs = projs.compute() + return projs + + @abstractmethod + def import_metadata(self, filedir): + """ + After creating Metadata classes and subclasses, this logically belongs to those + classes. TODO. + """ + ... + + @abstractmethod + def import_filedir_all(self, filedir): + """ + Imports a directory of files containing raw projections, flats, and darks rather + than a single file. + """ + ... + + @abstractmethod + def import_filedir_projections(self, filedir): + """ + Imports a directory of just the raw projections. + """ + ... + + @abstractmethod + def import_filedir_flats(self, filedir): + """ + Imports a directory of just the raw flats. + """ + ... + + @abstractmethod + def import_filedir_darks(self, filedir): + """ + Imports a directory of just the raw darks. + """ + ... + + @abstractmethod + def import_file_all(self, filepath): + """ + Imports a file containing all raw projections, flats, and darks. + """ + ... + + @abstractmethod + def import_file_projections(self, filepath): + """ + Imports a file containing all raw projections. + """ + ... + + @abstractmethod + def import_file_flats(self, filepath): + """ + Imports a file containing all raw flats. + """ + ... + + @abstractmethod + def import_file_darks(self, filepath): + """ + Imports a file containing all raw darks. + """ + ... + + +class RawProjectionsXRM_SSRL62C(RawProjectionsBase): + + """ + Raw data import functions associated with SSRL 6-2c. If you specify a folder filled + with raw XRMS, a ScanInfo file, and a run script, this will automatically import + your data and save it in a subfolder corresponding to the energy. + """ + + def __init__(self): + super().__init__() + self.allowed_extensions = self.allowed_extensions + [".xrm"] + self.angles_from_filenames = True + self.metadata = Metadata_SSRL62C_Raw() + + def import_metadata(self, Uploader): + self.metadata = Metadata_SSRL62C_Raw() + self.data_hierarchy_level = 0 + filetypes = [".txt"] + textfiles = self._file_finder(Uploader.filedir, filetypes) + self.scan_info_path = [ + Uploader.filedir / file for file in textfiles if "ScanInfo" in file + ][0] + self.parse_scan_info() + self.determine_scan_type() + self.run_script_path = [ + Uploader.filedir / file for file in textfiles if "ScanInfo" not in file + ] + if len(self.run_script_path) == 1: + self.run_script_path = self.run_script_path[0] + elif len(self.run_script_path) > 1: + for file in self.run_script_path: + with open(file, "r") as f: + line = f.readline() + if line.startswith(";;"): + self.run_script_path = file + self.angles_from_filenames = True + if self.scan_info["REFEVERYEXPOSURES"] == 1 and self.scan_type == "ENERGY_TOMO": + ( + self.flats_filenames, + self.data_filenames, + ) = self.get_all_data_filenames_filedir(Uploader.filedir) + self.angles_from_filenames = False + self.from_txrm = True + self.from_xrm = False + else: + ( + self.flats_filenames, + self.data_filenames, + ) = self.get_all_data_filenames() + self.txrm = False + self.from_xrm = True + # assume that the first projection is the same as the rest for metadata + self.scan_info["PROJECTION_METADATA"] = self.read_xrms_metadata( + [self.data_filenames[0]] + ) + self.scan_info["FLAT_METADATA"] = self.read_xrms_metadata( + [self.flats_filenames[0]] + ) + if self.angles_from_filenames: + self.get_angles_from_filenames() + else: + self.get_angles_from_txrm() + self.pxZ = len(self.angles_rad) + self.pxY = self.scan_info["PROJECTION_METADATA"][0]["image_height"] + self.pxX = self.scan_info["PROJECTION_METADATA"][0]["image_width"] + self.binning = self.scan_info["PROJECTION_METADATA"][0]["camera_binning"] + self.raw_data_type = self.scan_info["PROJECTION_METADATA"][0]["data_type"] + if self.raw_data_type == 5: + self.raw_data_type = np.dtype(np.uint16) + elif self.raw_data_type == 10: + self.raw_data_type = np.dtype(np.float32) + self.pixel_size_from_metadata = ( + self.scan_info["PROJECTION_METADATA"][0]["pixel_size"] * 1000 + ) # nm + self.get_and_set_energies(Uploader) + self.filedir = Uploader.filedir + self.metadata.filedir = Uploader.filedir + self.metadata.filename = "raw_metadata.json" + self.metadata.filepath = self.filedir / "raw_metadata.json" + self.metadata.set_metadata(self) + self.metadata.save_metadata() + + def import_filedir_all(self, Uploader): + self.import_metadata(Uploader) + self.user_overwrite_energy = Uploader.user_overwrite_energy + self.filedir = Uploader.filedir + self.selected_energies = Uploader.energy_select_multiple.value + if len(self.selected_energies) == 0: + self.selected_energies = (Uploader.energy_select_multiple.options[0],) + Uploader.energy_select_multiple.value = ( + Uploader.energy_select_multiple.options[0], + ) + if self.from_xrm: + self.import_from_run_script(Uploader) + elif self.from_txrm: + self.import_from_txrm(Uploader) + self.imported = True + + def import_filedir_projections(self, filedir): + pass + + def import_filedir_flats(self, filedir): + pass + + def import_filedir_darks(self, filedir): + pass + + def import_file_all(self, filepath): + pass + + def import_file_projections(self, filepath): + pass + + def import_file_flats(self, filepath): + pass + + def import_file_darks(self, filepath): + pass + + def parse_scan_info(self): + data_file_list = [] + self.scan_info = [] + with open(self.scan_info_path, "r") as f: + filecond = True + for line in f.readlines(): + if "FILES" not in line and filecond: + self.scan_info.append(line.strip()) + filecond = True + else: + filecond = False + _ = self.scan_info_path.parent / line.strip() + data_file_list.append(_) + metadata_tp = map(self.string_num_totuple, self.scan_info) + self.scan_info = {scanvar[0]: scanvar[1] for scanvar in metadata_tp} + self.scan_info["REFEVERYEXPOSURES"] = self.scan_info["REFEVERYEXPOSURES"][1:] + self.scan_info = {key: int(self.scan_info[key]) for key in self.scan_info} + self.scan_info["FILES"] = data_file_list[1:] + + def determine_scan_type(self): + self.scan_order = [ + (k, self.scan_info[k]) + for k in ("TOMO", "ENERGY", "MOSAIC", "MULTIEXPOSURE") + if self.scan_info[k] != 0 + ] + self.scan_order = sorted(self.scan_order, key=lambda x: x[1]) + self.scan_type = [string for string, val in self.scan_order] + self.scan_type = "_".join(self.scan_type) + + def get_and_set_energies(self, Uploader): + self.energy_guessed = False + energies = [] + with open(self.run_script_path, "r") as f: + for line in f.readlines(): + if line.startswith("sete "): + energies.append(float(line[5:])) + self.energies_list_float = sorted(list(set(energies))) + if self.energies_list_float == []: + self.energies_list_float = [ + self.est_en_from_px_size(self.pixel_size_from_metadata, self.binning) + ] + self.energy_guessed = True + self.energies_list_str = [ + f"{energy:08.2f}" for energy in self.energies_list_float + ] + self.raw_pixel_sizes = [ + self.calculate_px_size(energy, self.binning) + for energy in self.energies_list_float + ] + Uploader.energy_select_multiple.options = self.energies_list_str + if len(self.energies_list_str) > 10: + Uploader.energy_select_multiple.rows = 10 + else: + Uploader.energy_select_multiple.rows = len(self.energies_list_str) + if len(self.energies_list_str) == 1 and self.energy_guessed: + Uploader.energy_select_multiple.disabled = True + Uploader.energy_select_multiple.description = "Est. Energy (eV):" + Uploader.energy_overwrite_textbox.disabled = False + else: + Uploader.energy_select_multiple.description = "Energies (eV):" + Uploader.energy_select_multiple.disabled = False + Uploader.energy_overwrite_textbox.disabled = True + + def calculate_px_size(self, energy, binning): + """ + Calculates the pixel size based on the energy and binning. + From Johanna's calibration. + """ + pixel_size = 0.002039449 * energy - 0.792164997 + pixel_size = pixel_size * binning + return pixel_size + + def est_en_from_px_size(self, pixel_size, binning): + """ + Estimates the energy based on the pixel size. This is for plain TOMO data where + the energy is not available. You should be able to overwrite + this in the frontend if energy cannot be found. + Inverse of calculate_px_size. + """ + # From Johanna's calibration doc + energy = (pixel_size / binning + 0.792164997) / 0.002039449 + return energy + + def get_all_data_filenames(self): + """ + Grabs the flats and projections filenames from scan info. + + Returns + ------- + flats: list of pathlib.Path + All flat file names in self.scan_info["FILES"] + projs: list of pathlib.Path + All projection file names in self.scan_info["FILES"] + """ + + flats = [ + file.parent / file.name + for file in self.scan_info["FILES"] + if "ref_" in file.name + ] + projs = [ + file.parent / file.name + for file in self.scan_info["FILES"] + if "ref_" not in file.name + ] + return flats, projs + + def get_all_data_filenames_filedir(self, filedir): + """ + Grabs the flats and projections filenames from scan info. + + Returns + ------- + flats: list of pathlib.Path + All flat file names in self.scan_info["FILES"] + projs: list of pathlib.Path + All projection file names in self.scan_info["FILES"] + """ + txrm_files = self._file_finder(filedir, [".txrm"]) + xrm_files = self._file_finder(filedir, [".xrm"]) + txrm_files = [filedir / file for file in txrm_files] + xrm_files = [filedir / file for file in xrm_files] + if any(["ref_" in str(file) for file in txrm_files]): + flats = [ + file.parent / file.name for file in txrm_files if "ref_" in file.name + ] + else: + flats = [ + file.parent / file.name for file in xrm_files if "ref_" in file.name + ] + if any(["tomo_" in str(file) for file in txrm_files]): + projs = [ + file.parent / file.name for file in txrm_files if "tomo_" in file.name + ] + else: + projs = [ + file.parent / file.name for file in xrm_files if "tomo_" in file.name + ] + return flats, projs + + def get_angles_from_filenames(self): + """ + Grabs the angles from the file names in scan_info. + """ + reg_exp = re.compile("_[+-0]\d\d\d.\d\d") + self.angles_deg = map( + reg_exp.findall, [str(file) for file in self.data_filenames] + ) + self.angles_deg = [float(angle[0][1:]) for angle in self.angles_deg] + seen = set() + result = [] + for item in self.angles_deg: + if item not in seen: + seen.add(item) + result.append(item) + self.angles_deg = result + self.angles_rad = [x * np.pi / 180 for x in self.angles_deg] + + def get_angles_from_metadata(self): + """ + Gets the angles from the raw image metadata. + """ + self.angles_rad = [ + filemetadata["thetas"][0] + for filemetadata in self.scan_info["PROJECTION_METADATA"] + ] + seen = set() + result = [] + for item in self.angles_rad: + if item not in seen: + seen.add(item) + result.append(item) + self.angles_rad = result + self.angles_deg = [x * 180 / np.pi for x in self.angles_rad] + + def get_angles_from_txrm(self): + """ + Gets the angles from the raw image metadata. + """ + self.angles_rad = self.scan_info["PROJECTION_METADATA"][0]["thetas"] + self.angles_deg = [x * 180 / np.pi for x in self.angles_rad] + + def read_xrms_metadata(self, xrm_list): + """ + Reads XRM files and snags the metadata from them. + + Parameters + ---------- + xrm_list: list(pathlib.Path) + list of XRMs to grab metadata from + Returns + ------- + metadatas: list(dict) + List of metadata dicts for files in xrm_list + """ + metadatas = [] + for i, filename in enumerate(xrm_list): + ole = olefile.OleFileIO(str(filename)) + metadata = read_ole_metadata(ole) + metadatas.append(metadata) + return metadatas + + def load_xrms(self, xrm_list, Uploader): + """ + Loads XRM data from a file list in order, concatenates them to produce a stack + of data (npy). + + Parameters + ---------- + xrm_list: list(pathlib.Path) + list of XRMs to upload + Uploader: `Uploader` + Should have an upload_progress attribute. This is the progress bar. + Returns + ------- + data_stack: np.ndarray() + Data grabbed from xrms in xrm_list + metadatas: list(dict) + List of metadata dicts for files in xrm_list + """ + data_stack = None + metadatas = [] + for i, filename in enumerate(xrm_list): + data, metadata = read_xrm(str(filename)) + if data_stack is None: + data_stack = np.zeros((len(xrm_list),) + data.shape, data.dtype) + data_stack[i] = data + metadatas.append(metadata) + Uploader.upload_progress.value += 1 + data_stack = np.flip(data_stack, axis=1) + return data_stack, metadatas + + def load_txrm(self, txrm_filepath): + data, metadata = read_txrm(str(txrm_filepath)) + data = img_as_float32(data) + return data, metadata + + def import_from_txrm(self, Uploader): + """ + Script to upload selected data from selected txrm energies. + + If an energy is selected on the frontend, it will be added to the queue to + upload and normalize. + + This reads the run script in the folder. Each time "set e" is in the run script, + this means that the energy is changing and signifies a new tomography. + + Parameters + ---------- + Uploader: `Uploader` + Should have an upload_progress, status_label, and progress_output attribute. + This is for the progress bar and information during the upload progression. + """ + parent_metadata = self.metadata.metadata.copy() + if "data_hierarchy_level" not in parent_metadata: + try: + with open(self.filepath) as f: + parent_metadata = json.load( + self.run_script_path.parent / "raw_metadata.json" + ) + except Exception: + pass + for energy in self.selected_energies: + _tmp_filedir = copy.deepcopy(self.filedir) + self.metadata = Metadata_SSRL62C_Prenorm() + self.metadata.set_parent_metadata(parent_metadata) + Uploader.upload_progress.value = 0 + self.energy_str = energy + self.energy_float = float(energy) + self.px_size = self.calculate_px_size(float(energy), self.binning) + Uploader.progress_output.clear_output() + self.energy_label = Label( + f"{energy} eV", layout=Layout(justify_content="center") + ) + with Uploader.progress_output: + display(self.energy_label) + # Getting filename from specific energy + self.flats_filename = [ + file.parent / file.name + for file in self.flats_filenames + if energy in file.name and "ref_" in file.name + ] + self.data_filename = [ + file.parent / file.name + for file in self.data_filenames + if energy in file.name and "tomo_" in file.name + ] + self.status_label = Label( + "Uploading txrm.", layout=Layout(justify_content="center") + ) + self.flats, self.scan_info["FLAT_METADATA"] = self.load_txrm( + self.flats_filename[0] + ) + self._data, self.scan_info["PROJECTION_METADATA"] = self.load_txrm( + self.data_filename[0] + ) + self.darks = np.zeros_like(self.flats[0])[np.newaxis, ...] + self.make_import_savedir(str(energy + "eV")) + self.status_label.value = "Normalizing." + self.normalize() + self._data = np.flip(self._data, axis=1) + self.data = self._data + self.status_label.value = "Calculating histogram of raw data and saving." + self._np_hist_and_save_data() + self.saved_as_tiff = False + self.filedir = self.import_savedir + if Uploader.save_tiff_on_import_checkbox.value: + self.status_label.value = "Saving projections as .tiff." + self.saved_as_tiff = True + self.save_normalized_as_tiff() + self.status_label.value = "Downsampling data." + self._check_downsampled_data() + self.status_label.value = "Saving metadata." + self.data_hierarchy_level = 1 + self.metadata.set_metadata(self) + self.metadata.filedir = self.import_savedir + self.metadata.filename = "import_metadata.json" + self.metadata.save_metadata() + self.filedir = _tmp_filedir + self._close_hdf_file() + + def import_from_run_script(self, Uploader): + """ + Script to upload selected data from a run script. + + If an energy is selected on the frontend, it will be added to the queue to + upload and normalize. + + This reads the run script in the folder. Each time "set e" is in the run script, + this means that the energy is changing and signifies a new tomography. + + Parameters + ---------- + Uploader: `Uploader` + Should have an upload_progress, status_label, and progress_output attribute. + This is for the progress bar and information during the upload progression. + """ + all_collections = [[]] + energies = [[self.selected_energies[0]]] + parent_metadata = self.metadata.metadata.copy() + if "data_hierarchy_level" not in parent_metadata: + try: + with open(self.filepath) as f: + parent_metadata = json.load( + self.run_script_path.parent / "raw_metadata.json" + ) + except Exception: + pass + with open(self.run_script_path, "r") as f: + for line in f.readlines(): + if line.startswith("sete "): + energies.append(f"{float(line[5:]):08.2f}") + all_collections.append([]) + elif line.startswith("collect "): + filename = line[8:].strip() + all_collections[-1].append(self.run_script_path.parent / filename) + if len(energies) > 1: + energies.pop(0) + all_collections.pop(0) + else: + energies = energies[0] + + for energy, collect in zip(energies, all_collections): + if energy not in self.selected_energies: + continue + else: + _tmp_filedir = copy.deepcopy(self.filedir) + self.metadata = Metadata_SSRL62C_Prenorm() + self.metadata.set_parent_metadata(parent_metadata) + Uploader.upload_progress.value = 0 + self.energy_str = energy + self.energy_float = float(energy) + self.px_size = self.calculate_px_size(float(energy), self.binning) + Uploader.progress_output.clear_output() + self.energy_label = Label( + f"{energy} eV", layout=Layout(justify_content="center") + ) + with Uploader.progress_output: + display(Uploader.upload_progress) + display(self.energy_label) + # Getting filename from specific energy + self.flats_filenames = [ + file.parent / file.name for file in collect if "ref_" in file.name + ] + self.data_filenames = [ + file.parent / file.name + for file in collect + if "ref_" not in file.name + ] + self.proj_ind = [ + True if "ref_" not in file.name else False for file in collect + ] + self.status_label = Label( + "Uploading .xrms.", layout=Layout(justify_content="center") + ) + with Uploader.progress_output: + display(self.status_label) + # Uploading Data + Uploader.upload_progress.max = len(self.flats_filenames) + len( + self.data_filenames + ) + self.flats, self.scan_info["FLAT_METADATA"] = self.load_xrms( + self.flats_filenames, Uploader + ) + self._data, self.scan_info["PROJECTION_METADATA"] = self.load_xrms( + self.data_filenames, Uploader + ) + self.darks = np.zeros_like(self.flats[0])[np.newaxis, ...] + self.make_import_savedir(str(energy + "eV")) + projs, flats, darks = self.setup_normalize() + self.status_label.value = "Calculating flat positions." + self.flats_ind_from_collect(collect) + self.status_label.value = "Normalizing." + self._data = RawProjectionsBase.normalize_and_average( + projs, + flats, + darks, + self.flats_ind, + self.scan_info["NEXPOSURES"], + status_label=self.status_label, + compute=False, + ) + self.data = self._data + self.status_label.value = ( + "Calculating histogram of raw data and saving." + ) + self._dask_hist_and_save_data() + self.saved_as_tiff = False + self.filedir = self.import_savedir + if Uploader.save_tiff_on_import_checkbox.value: + self.status_label.value = "Saving projections as .tiff." + self.saved_as_tiff = True + self.save_normalized_as_tiff() + self.status_label.value = "Downsampling data." + self._check_downsampled_data() + self.status_label.value = "Saving metadata." + self.data_hierarchy_level = 1 + self.metadata.set_metadata(self) + self.metadata.filedir = self.import_savedir + self.metadata.filename = "import_metadata.json" + self.metadata.save_metadata() + self.filedir = _tmp_filedir + self._close_hdf_file() + + def setup_normalize(self): + """ + Function to lazy load flats and projections as npy, convert to chunked dask + arrays for normalization. + + Returns + ------- + projs: dask array + Projections chunked by scan_info["NEXPOSURES"] + flats: dask array + References chunked by scan_info["REFNEXPOSURES"] + darks: dask array + Zeros array with the same image dimensions as flats + """ + data_dict = { + self.hdf_key_raw_flats: self.flats, + self.hdf_key_raw_proj: self._data, + } + self.dask_data_to_h5(data_dict, savedir=self.import_savedir) + self.filepath = self.import_savedir / self.normalized_projections_hdf_key + self._open_hdf_file_read_write() + z_chunks_proj = self.scan_info["NEXPOSURES"] + z_chunks_flats = self.scan_info["REFNEXPOSURES"] + self.flats = None + self._data = None + + self.flats = da.from_array( + self.hdf_file[self.hdf_key_raw_flats], + chunks=(z_chunks_flats, -1, -1), + ).astype(np.float32) + + self._data = da.from_array( + self.hdf_file[self.hdf_key_raw_proj], + chunks=(z_chunks_proj, -1, -1), + ).astype(np.float32) + darks = da.from_array(self.darks, chunks=(-1, -1, -1)).astype(np.float32) + projs = self._data + flats = self.flats + + return projs, flats, darks + + def flats_ind_from_collect(self, collect): + """ + Calculates where the flats indexes are based on the current "collect", which + is a collection under each "set e" from the run script importer. + + This will set self.flats_ind for normalization. + """ + copy_collect = collect.copy() + i = 0 + for pos, file in enumerate(copy_collect): + if "ref_" in file.name: + if i == 0: + i = 1 + elif i == 1: + copy_collect[pos] = 1 + elif "ref_" not in file.name: + i = 0 + copy_collect = [value for value in copy_collect if value != 1] + ref_ind = [True if "ref_" in file.name else False for file in copy_collect] + ref_ind = [i for i in range(len(ref_ind)) if ref_ind[i]] + ref_ind = sorted(list(set(ref_ind))) + ref_ind = [ind - i for i, ind in enumerate(ref_ind)] + # These indexes are at the position of self.data_filenames that + # STARTS the next round after the references are taken + self.flats_ind = ref_ind + + def string_num_totuple(self, s): + """ + Helper function for import_metadata. I forget what it does. :) + """ + return ( + "".join(c for c in s if c.isalpha()) or None, + "".join(c for c in s if c.isdigit() or None), + ) + + +class RawProjectionsTiff_SSRL62B(RawProjectionsBase): + + """ + Raw data import functions associated with SSRL 6-2c. If you specify a folder filled + with raw XRMS, a ScanInfo file, and a run script, this will automatically import + your data and save it in a subfolder corresponding to the energy. + """ + + def __init__(self): + super().__init__() + self.allowed_extensions = self.allowed_extensions + [".xrm"] + self.angles_from_filenames = True + self.metadata_projections = Metadata_SSRL62B_Raw_Projections() + self.metadata_references = Metadata_SSRL62B_Raw_References() + self.metadata = Metadata_SSRL62B_Raw( + self.metadata_projections, self.metadata_references + ) + + def import_data(self, Uploader): + self.import_metadata() + self.metadata_projections.set_extra_metadata(Uploader) + self.metadata_references.set_extra_metadata(Uploader) + self.metadata.filedir = self.metadata_projections.filedir + self.filedir = self.metadata.filedir + self.metadata.filepath = self.metadata.filedir / self.metadata.filename + self.metadata.save_metadata() + save_filedir_name = str(self.metadata_projections.metadata["energy_str"] + "eV") + self.import_savedir = self.metadata_projections.filedir / save_filedir_name + self.make_import_savedir(save_filedir_name) + self.import_filedir_projections(Uploader) + self.import_filedir_flats(Uploader) + self.filedir = self.import_savedir + projs, flats, darks = self.setup_normalize(Uploader) + Uploader.import_status_label.value = "Normalizing projections" + self._data = self.normalize_no_locations_no_average( + projs, flats, darks, compute=False + ) + self.data = self._data + hist, r, bins, percentile = self._dask_hist() + grp = self.hdf_key_norm + data_dict = { + self.hdf_key_norm_proj: self.data, + grp + self.hdf_key_bin_frequency: hist[0], + grp + self.hdf_key_bin_edges: hist[1], + grp + self.hdf_key_image_range: r, + grp + self.hdf_key_percentile: percentile, + } + self.dask_data_to_h5(data_dict, savedir=self.import_savedir) + self._dask_bin_centers(grp, write=True, savedir=self.import_savedir) + Uploader.import_status_label.value = "Downsampling data in a pyramid" + self.filedir = self.import_savedir + self._check_downsampled_data(label=Uploader.import_status_label) + self.metadata_projections.set_attributes_from_metadata(self) + self.metadata_prenorm = Metadata_SSRL62B_Prenorm() + self.metadata_prenorm.set_metadata(self) + self.metadata_prenorm.metadata[ + "parent_metadata" + ] = self.metadata.metadata.copy() + if Uploader.save_tiff_on_import_checkbox.value: + self.status_label.value = "Saving projections as .tiff." + self.saved_as_tiff = True + self.save_normalized_as_tiff() + self.metadata["saved_as_tiff"] = projections.saved_as_tiff + self.metadata_prenorm.filedir = self.filedir + self.metadata_prenorm.filepath = self.filedir / self.metadata_prenorm.filename + self.metadata_prenorm.save_metadata() + + self.hdf_file.close() + + def import_metadata(self): + self.metadata = Metadata_SSRL62B_Raw( + self.metadata_projections, self.metadata_references + ) + + def import_metadata_projections(self, Uploader): + self.projections_filedir = Uploader.projections_metadata_filepath.parent + self.metadata_projections = Metadata_SSRL62B_Raw_Projections() + self.metadata_projections.filedir = ( + Uploader.projections_metadata_filepath.parent + ) + self.metadata_projections.filename = Uploader.projections_metadata_filepath.name + self.metadata_projections.filepath = Uploader.projections_metadata_filepath + self.metadata_projections.parse_raw_metadata() + self.metadata_projections.set_extra_metadata(Uploader) + + def import_metadata_references(self, Uploader): + self.references_filedir = Uploader.references_metadata_filepath.parent + self.metadata_references = Metadata_SSRL62B_Raw_References() + self.metadata_references.filedir = Uploader.references_metadata_filepath.parent + self.metadata_references.filename = Uploader.references_metadata_filepath.name + self.metadata_references.filepath = Uploader.references_metadata_filepath + self.metadata_references.parse_raw_metadata() + self.metadata_references.set_extra_metadata(Uploader) + + def import_filedir_all(self, Uploader): + pass + + def import_filedir_projections(self, Uploader): + tifffiles = self.metadata_projections.metadata["filenames"] + tifffiles = [self.projections_filedir / file for file in tifffiles] + Uploader.upload_progress.value = 0 + Uploader.upload_progress.max = len(tifffiles) + Uploader.import_status_label.value = "Uploading projections" + Uploader.progress_output.clear_output() + with Uploader.progress_output: + display( + VBox( + [Uploader.upload_progress, Uploader.import_status_label], + layout=Layout(justify_content="center", align_items="center"), + ) + ) + + arr = [] + for file in tifffiles: + arr.append(tf.imread(file)) + Uploader.upload_progress.value += 1 + Uploader.import_status_label.value = "Converting to numpy array" + arr = np.array(arr) + arr = np.rot90(arr, axes=(1, 2)) + Uploader.import_status_label.value = "Converting to dask array" + arr = da.from_array(arr, chunks={0: "auto", 1: -1, 2: -1}) + Uploader.import_status_label.value = "Saving in normalized_projections.hdf5" + data_dict = {self.hdf_key_raw_proj: arr} + da.to_hdf5(self.import_savedir / self.normalized_projections_hdf_key, data_dict) + + def import_filedir_flats(self, Uploader): + tifffiles = self.metadata_references.metadata["filenames"] + tifffiles = [self.metadata_references.filedir / file for file in tifffiles] + Uploader.upload_progress.value = 0 + Uploader.upload_progress.max = len(tifffiles) + Uploader.import_status_label.value = "Uploading references" + arr = [] + for file in tifffiles: + arr.append(tf.imread(file)) + Uploader.upload_progress.value += 1 + Uploader.import_status_label.value = "Converting to numpy array" + arr = np.array(arr) + arr = np.rot90(arr, axes=(1, 2)) + Uploader.import_status_label.value = "Converting to dask array" + arr = da.from_array(arr, chunks={0: "auto", 1: -1, 2: -1}) + Uploader.import_status_label.value = "Saving in normalized_projections.hdf5" + data_dict = {self.hdf_key_raw_flats: arr} + da.to_hdf5(self.import_savedir / self.normalized_projections_hdf_key, data_dict) + + def import_filedir_darks(self, filedir): + pass + + def import_file_all(self, filepath): + pass + + def import_file_projections(self, filepath): + pass + + def import_file_flats(self, filepath): + pass + + def import_file_darks(self, filepath): + pass + + def setup_normalize(self, Uploader): + """ + Function to lazy load flats and projections as npy, convert to chunked dask + arrays for normalization. + + Returns + ------- + projs: dask array + Projections chunked by scan_info["NEXPOSURES"] + flats: dask array + References chunked by scan_info["REFNEXPOSURES"] + darks: dask array + Zeros array with the same image dimensions as flats + """ + self.flats = None + self._data = None + self.hdf_file = h5py.File( + self.import_savedir / self.normalized_projections_hdf_key, "a" + ) + self.flats = self.hdf_file[self.hdf_key_raw_flats] + self._data = self.hdf_file[self.hdf_key_raw_proj] + self.darks = np.zeros_like(self.flats[0])[np.newaxis, ...] + projs = da.from_array(self._data).astype(np.float32) + flats = da.from_array(self.flats).astype(np.float32) + darks = da.from_array(self.darks).astype(np.float32) + return projs, flats, darks + + +class RawProjectionsHDF5_ALS832(RawProjectionsBase): + """ + This class holds your projections data, metadata, and functions associated with + importing that data and metadata. + + For SSRL62C, this is a very complicated class. Because of your h5 data storage, + it is relatively more straightforward to import and normalize. + + You can overload the functions in subclasses if you have more complicated + import and normalization protocols for your data. + """ + + def __init__(self): + super().__init__() + self.allowed_extensions = [".h5"] + self.metadata = Metadata_ALS_832_Raw() + + def import_filedir_all(self, filedir): + pass + + def import_filedir_projections(self, filedir): + pass + + def import_filedir_flats(self, filedir): + pass + + def import_filedir_darks(self, filedir): + pass + + def import_file_all(self, Uploader): + self.import_status_label = Uploader.import_status_label + self.tic = time.perf_counter() + self.filedir = Uploader.filedir + self.filename = Uploader.filename + self.filepath = self.filedir / self.filename + self.metadata = Uploader.reset_metadata_to() + self.metadata.load_metadata_h5(self.filepath) + self.metadata.set_attributes_from_metadata(self) + self.import_status_label.value = "Importing" + ( + self._data, + self.flats, + self.darks, + self.angles_rad, + ) = dxchange.exchange.read_aps_tomoscan_hdf5(self.filepath) + self.data = self._data + self.angles_deg = (180 / np.pi) * self.angles_rad + self.metadata.set_metadata(self) + self.metadata.save_metadata() + self.imported = True + self.import_savedir = self.filedir / str(self.filepath.stem) + # if the save directory already exists (you have previously uploaded this + # raw data), then it will create a datestamped folder. + if self.import_savedir.exists(): + now = datetime.datetime.now() + dt_str = now.strftime("%Y%m%d-%H%M-") + save_name = dt_str + str(self.filepath.stem) + self.import_savedir = pathlib.Path(self.filedir / save_name) + if self.import_savedir.exists(): + dt_str = now.strftime("%Y%m%d-%H%M%S-") + save_name = dt_str + str(self.filepath.stem) + self.import_savedir = pathlib.Path(self.filedir / save_name) + self.import_savedir.mkdir() + self.import_status_label.value = "Normalizing" + self.normalize() + self.data = da.from_array(self.data, chunks={0: "auto", 1: -1, 2: -1}) + self.import_status_label.value = "Saving projections as hdf" + self.save_data_and_metadata(Uploader) + + def import_metadata(self, filepath=None): + if filepath is None: + filepath = self.filepath + self.metadata.load_metadata_h5(filepath) + self.metadata.set_attributes_from_metadata(self) + + def import_file_projections(self, filepath): + tomo_grp = "/".join([exchange_base, "data"]) + tomo = dxreader.read_hdf5(fname, tomo_grp, slc=(proj, sino), dtype=dtype) + + def import_file_flats(self, filepath): + flat_grp = "/".join([exchange_base, "data_white"]) + flat = dxreader.read_hdf5(fname, flat_grp, slc=(None, sino), dtype=dtype) + + def import_file_darks(self, filepath): + dark_grp = "/".join([exchange_base, "data_dark"]) + dark = dxreader.read_hdf5(fname, dark_grp, slc=(None, sino), dtype=dtype) + + def import_file_angles(self, filepath): + theta_grp = "/".join([exchange_base, "theta"]) + theta = dxreader.read_hdf5(fname, theta_grp, slc=None) + + def save_normalized_metadata(self, import_time=None, parent_metadata=None): + metadata = Metadata_ALS_832_Prenorm() + metadata.filedir = self.filedir + metadata.metadata = parent_metadata.copy() + if parent_metadata is not None: + metadata.metadata["parent_metadata"] = parent_metadata.copy() + if import_time is not None: + metadata.metadata["import_time"] = import_time + metadata.set_metadata(self) + metadata.save_metadata() + return metadata + + def save_data_and_metadata(self, Uploader): + """ + Saves current data and metadata in import_savedir. + """ + self.filedir = self.import_savedir + self._dask_hist_and_save_data() + self.saved_as_tiff = False + _metadata = self.metadata.metadata.copy() + if Uploader.save_tiff_on_import_checkbox.value: + Uploader.import_status_label.value = "Saving projections as .tiff." + self.saved_as_tiff = True + self.save_normalized_as_tiff() + self.metadata.metadata["saved_as_tiff"] = True + self.metadata.filedir = self.filedir + self.toc = time.perf_counter() + self.metadata = self.save_normalized_metadata(self.toc - self.tic, _metadata) + Uploader.import_status_label.value = "Checking for downsampled data." + self._check_downsampled_data(label=Uploader.import_status_label) + + +class RawProjectionsHDF5_APS(RawProjectionsBase): + """ + This class holds your projections data, metadata, and functions associated with + importing that data and metadata. + + For SSRL62C, this is a very complicated class. Because of your h5 data storage, + it is relatively more straightforward to import and normalize. + + You can overload the functions in subclasses if you have more complicated + import and normalization protocols for your data. + """ + + def __init__(self): + super().__init__() + self.allowed_extensions = [".h5"] + self.metadata = Metadata_APS_Raw() + + def import_filedir_all(self, filedir): + pass + + def import_filedir_projections(self, filedir): + pass + + def import_filedir_flats(self, filedir): + pass + + def import_filedir_darks(self, filedir): + pass + + def import_file_all(self, Uploader): + self.import_status_label = Uploader.import_status_label + self.tic = time.perf_counter() + self.filedir = Uploader.filedir + self.filename = Uploader.filename + self.filepath = self.filedir / self.filename + self.metadata = Uploader.reset_metadata_to() + self.metadata.load_metadata_h5(self.filepath) + self.metadata.set_attributes_from_metadata(self) + self.import_status_label.value = "Importing" + self.metadata.set_attributes_from_metadata(self) + ( + self._data, + self.flats, + self.darks, + self.angles_rad, + ) = dxchange.exchange.read_aps_tomoscan_hdf5(self.filepath) + self.data = self._data + self.angles_deg = (180 / np.pi) * self.angles_rad + self.metadata.set_metadata(self) + self.metadata.save_metadata() + self.imported = True + self.import_savedir = self.filedir / str(self.filepath.stem) + # if the save directory already exists (you have previously uploaded this + # raw data), then it will create a datestamped folder. + if self.import_savedir.exists(): + now = datetime.datetime.now() + dt_str = now.strftime("%Y%m%d-%H%M-") + save_name = dt_str + str(self.filepath.stem) + self.import_savedir = pathlib.Path(self.filedir / save_name) + if self.import_savedir.exists(): + dt_str = now.strftime("%Y%m%d-%H%M%S-") + save_name = dt_str + str(self.filepath.stem) + self.import_savedir = pathlib.Path(self.filedir / save_name) + self.import_savedir.mkdir() + self.import_status_label.value = "Normalizing" + self.normalize() + _metadata = self.metadata.metadata.copy() + self.import_status_label.value = "Saving projections as npy for faster IO" + self.filedir = self.import_savedir + self.save_normalized_as_npy() + self._check_downsampled_data() + self.toc = time.perf_counter() + self.metadata = self.save_normalized_metadata(self.toc - self.tic, _metadata) + + def import_metadata(self, filepath=None): + if filepath is None: + filepath = self.filepath + self.metadata.load_metadata_h5(filepath) + self.metadata.set_attributes_from_metadata(self) + + def import_file_projections(self, filepath): + tomo_grp = "/".join([exchange_base, "data"]) + tomo = dxreader.read_hdf5(fname, tomo_grp, slc=(proj, sino), dtype=dtype) + + def import_file_flats(self, filepath): + flat_grp = "/".join([exchange_base, "data_white"]) + flat = dxreader.read_hdf5(fname, flat_grp, slc=(None, sino), dtype=dtype) + + def import_file_darks(self, filepath): + dark_grp = "/".join([exchange_base, "data_dark"]) + dark = dxreader.read_hdf5(fname, dark_grp, slc=(None, sino), dtype=dtype) + + def import_file_angles(self, filepath): + theta_grp = "/".join([exchange_base, "theta"]) + theta = dxreader.read_hdf5(fname, theta_grp, slc=None) + + def save_normalized_metadata(self, import_time=None, parent_metadata=None): + metadata = Metadata_APS_Prenorm() + metadata.filedir = self.filedir + metadata.metadata = parent_metadata.copy() + if parent_metadata is not None: + metadata.metadata["parent_metadata"] = parent_metadata.copy() + if import_time is not None: + metadata.metadata["import_time"] = import_time + metadata.set_metadata(self) + metadata.save_metadata() + return metadata + + +class Metadata(ABC): + """ + Base class for all metadatas. + """ + + def __init__(self): + self.header_font_style = { + "font_size": "22px", + "font_weight": "bold", + "font_variant": "small-caps", + # "text_color": "#0F52BA", + } + self.table_label = Label(style=self.header_font_style) + self.metadata = {} + self.filedir = None + self.filename = None + self.filepath = None + + def save_metadata(self): + with open(self.filedir / self.filename, "w+") as f: + a = safe_serialize(self.metadata, f) + + def load_metadata(self): + with open(self.filepath) as f: + self.metadata = json.load(f) + + return self.metadata + + def set_parent_metadata(self, parent_metadata): + self.metadata["parent_metadata"] = parent_metadata + self.metadata["data_hierarchy_level"] = ( + parent_metadata["data_hierarchy_level"] + 1 + ) + + def create_metadata_box(self): + """ + Creates the box to be displayed on the frontend when importing data. Has both + a label and the metadata dataframe (stored in table_output). + + """ + self.metadata_to_DataFrame() + self.table_output = Output() + if self.dataframe is not None: + with self.table_output: + display(self.dataframe) + self.metadata_vbox = VBox( + [self.table_label, self.table_output], layout=Layout(align_items="center") + ) + + @staticmethod + def parse_metadata_type(filepath: pathlib.Path = None, metadata=None): + """ + Determines the type of metadata by looking at the "metadata_type" key in the + loaded dictionary. + + Parameters + ---------- + filepath: pathlib.Path + Filepath for the metadata. If this is not specified, metadata should be + specified + metadata: dict + A metadata dictionary with the "metadata_type" key. If this is not + specified, a filepath should be specified. + + Returns + ------- + A metadata instance with the metadata. + + """ + if filepath is not None: + with open(filepath) as f: + metadata = json.load(f) + + if "metadata_type" not in metadata: + metadata["metadata_type"] = "SSRL62C_Normalized" + + # General Data + if metadata["metadata_type"] == "General_Normalized": + metadata_instance = Metadata_General_Prenorm() + + # SSRL Beamlines + if metadata["metadata_type"] == "SSRL62C_Normalized": + metadata_instance = Metadata_SSRL62C_Prenorm() + if metadata["metadata_type"] == "SSRL62C_Raw": + metadata_instance = Metadata_SSRL62C_Raw() + if metadata["metadata_type"] == "SSRL62B_Normalized": + metadata_instance = Metadata_SSRL62B_Prenorm() + if metadata["metadata_type"] == "SSRL62B_Raw": + metadata_instance = Metadata_SSRL62B_Raw() + + # ALS Beamlines + if metadata["metadata_type"] == "ALS832_Normalized": + metadata_instance = Metadata_ALS_832_Prenorm() + if metadata["metadata_type"] == "ALS832_Raw": + metadata_instance = Metadata_ALS_832_Raw() + + # Metadata through rest of processing pipeline + if metadata["metadata_type"] == "Prep": + metadata_instance = Metadata_Prep() + if metadata["metadata_type"] == "Align": + metadata_instance = Metadata_Align() + if metadata["metadata_type"] == "Recon": + metadata_instance = Metadata_Recon() + + if filepath is not None: + metadata_instance.filedir = filepath.parent + metadata_instance.filename = filepath.name + metadata_instance.filepath = filepath + + return metadata_instance + + @staticmethod + def get_metadata_hierarchy(filepath): + """ + Reads in a metadata file from filepath and determines its hierarchy. Generates + a list of `Metadata` instances, found by Metadata.parse_metadata_type. + + Parameters + ---------- + filepath: pathlike + Metadata file path. + + Returns + ------- + metadata_insts: list(`Metadata`) + List of metadata instances associated with the metadata file. + """ + with open(filepath) as f: + metadata = json.load(f) + num_levels = metadata["data_hierarchy_level"] + metadata_insts = [] + for i in range(num_levels + 1): + metadata_insts.append( + Metadata.parse_metadata_type(metadata=metadata.copy()) + ) + metadata_insts[i].metadata = metadata.copy() + if "parent_metadata" in metadata: + metadata = metadata["parent_metadata"].copy() + return metadata_insts + + @abstractmethod + def set_metadata(self, projections): + """ + Sets metadata from projections attributes. + """ + ... + + @abstractmethod + def metadata_to_DataFrame(self): + """ + This will take the metadata that you have and turn it into a table for display + on the frontend. It is a little complicated, but I don't know pandas very well. + You will have "top_headers" which are the headers at the top of the table like + "Image Information". The subheaders are called "middle_headers": things like + the X Pixels, Y Pixels, and the number of angles. Then below each of the middle + headers, you have the data. The dimensions of each should match up properly + + This creates a dataframe and then s.set_table_styles() styles it. This styling + function is based on CSS, which I know very little about. You can make the + table as fancy as you want, but for now I just have a blue background header + and white lines dividing the major table sections. + """ + ... + + +class Metadata_General_Prenorm(Metadata): + """ + General prenormalized metadata. This will be created if you are importing a tiff + or tiff stack, or npy file that was not previously imported using TomoPyUI. + """ + + def __init__(self): + super().__init__() + self.filename = "import_metadata.json" + self.metadata["metadata_type"] = "General_Normalized" + self.metadata["data_hierarchy_level"] = 0 + self.data_hierarchy_level = 0 + self.imported = False + self.table_label.value = "User Metadata" + + def set_metadata(self, projections): + pass + + def metadata_to_DataFrame(self): + # create headers and data for table + self.metadata["energy_str"] = f"{self.metadata['energy_float']:0.2f}" + px_size = self.metadata["pixel_size"] + px_units = self.metadata["pixel_units"] + en_units = self.metadata["energy_units"] + start_angle = self.metadata["start_angle"] + end_angle = self.metadata["end_angle"] + ang_res = self.metadata["angular_resolution"] + self.metadata["num_angles"] = int((end_angle - start_angle) / ang_res) + + self.metadata_list_for_table = [ + { + f"Energy ({en_units})": self.metadata["energy_str"], + "Start θ (°)": f"{start_angle:0.1f}", + "End θ (°)": f"{end_angle:0.1f}", + "Angular Resolution (°)": f"{ang_res:0.2f}", + }, + { + f"Pixel Size ({px_units})": f"{px_size:0.2f}", + "Binning": self.metadata["binning"], + "Num. θ (est)": self.metadata["num_angles"], + }, + ] + if "pxX" in self.metadata: + self.metadata_list_for_table[1]["X Pixels"] = self.metadata["pxX"] + self.metadata_list_for_table[1]["Y Pixels"] = self.metadata["pxY"] + self.metadata_list_for_table[1]["Num. θ"] = self.metadata["pxZ"] + self.make_angles_from_metadata() + + middle_headers = [[]] + data = [[]] + for i in range(len(self.metadata_list_for_table)): + middle_headers.append([key for key in self.metadata_list_for_table[i]]) + data.append( + [ + self.metadata_list_for_table[i][key] + for key in self.metadata_list_for_table[i] + ] + ) + data.pop(0) + middle_headers.pop(0) + top_headers = [["Acquisition Information"]] + top_headers.append(["Image Information"]) + + # create dataframe with the above settings + df = pd.DataFrame( + [data[0]], + columns=pd.MultiIndex.from_product([top_headers[0], middle_headers[0]]), + ) + for i in range(len(middle_headers)): + if i == 0: + continue + else: + newdf = pd.DataFrame( + [data[i]], + columns=pd.MultiIndex.from_product( + [top_headers[i], middle_headers[i]] + ), + ) + df = df.join(newdf) + + # set datatable styles + s = df.style.hide(axis="index") + s.set_table_styles( + { + # ("Acquisition Information", middle_headers[0][0]): [ + # {"selector": "td", "props": "border-left: 1px solid white"}, + # {"selector": "th", "props": "border-left: 1px solid white"}, + # ], + ("Image Information", middle_headers[1][0]): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + }, + overwrite=False, + ) + s.set_table_styles( + [ + {"selector": "th.col_heading", "props": "text-align: center;"}, + {"selector": "th.col_heading.level0", "props": "font-size: 1.2em;"}, + {"selector": "td", "props": "text-align: center;" "font-size: 1.2em;"}, + { + "selector": "th:not(.index_name)", + "props": "background-color: #0F52BA; color: white;", + }, + ], + overwrite=False, + ) + + self.dataframe = s + + def set_attributes_from_metadata_before_import(self, projections): + projections.pxX = self.metadata["pxX"] + projections.pxY = self.metadata["pxY"] + projections.pxZ = self.metadata["pxZ"] + projections.angles_rad = self.metadata["angles_rad"] + projections.angles_deg = self.metadata["angles_deg"] + projections.start_angle = self.metadata["start_angle"] + projections.end_angle = self.metadata["end_angle"] + projections.binning = self.metadata["binning"] + projections.energy_str = self.metadata["energy_str"] + projections.energy_float = self.metadata["energy_float"] + projections.energy = projections.energy_float + projections.energy_units = self.metadata["energy_units"] + projections.px_size = self.metadata["pixel_size"] + projections.pixel_units = self.metadata["pixel_units"] + + def set_metadata_from_attributes_after_import(self, projections): + self.metadata["normalized_projections_size_gb"] = projections.size_gb + self.metadata["normalized_projections_directory"] = str( + projections.import_savedir + ) + if "filedir_ds" in projections.__dict__: + self.metadata["downsampled_projections_directory"] = str( + projections.filedir_ds + ) + self.metadata["saved_as_tiff"] = projections.saved_as_tiff + self.metadata["num_angles"] = projections.data.shape[0] + self.metadata["pxX"] = projections.data.shape[2] + self.metadata["pxY"] = projections.data.shape[1] + self.metadata["pxZ"] = projections.data.shape[0] + + def set_attributes_from_metadata(self, projections): + projections.pxX = self.metadata["pxX"] + projections.pxY = self.metadata["pxY"] + projections.pxZ = self.metadata["pxZ"] + projections.start_angle = self.metadata["start_angle"] + projections.end_angle = self.metadata["end_angle"] + projections.binning = self.metadata["binning"] + projections.energy_str = self.metadata["energy_str"] + projections.energy_float = self.metadata["energy_float"] + projections.energy = projections.energy_float + projections.energy_units = self.metadata["energy_units"] + projections.px_size = self.metadata["pixel_size"] + projections.pixel_units = self.metadata["pixel_units"] + projections.size_gb = self.metadata["normalized_projections_size_gb"] + projections.import_savedir = pathlib.Path( + self.metadata["normalized_projections_directory"] + ) + if "downsampled_projections_directory" in self.metadata: + projections.filedir_ds = pathlib.Path( + self.metadata["downsampled_projections_directory"] + ) + projections.saved_as_tiff = self.metadata["saved_as_tiff"] + if "angles_rad" in self.metadata: + projections.angles_rad = self.metadata["angles_rad"] + projections.angles_deg = self.metadata["angles_deg"] + + def make_angles_from_metadata(self): + self.metadata["angles_rad"] = angle_maker( + self.metadata["pxZ"], + ang1=self.metadata["start_angle"], + ang2=self.metadata["end_angle"], + ) + self.metadata["angles_rad"] = list(self.metadata["angles_rad"]) + self.metadata["angles_deg"] = [ + x * 180 / np.pi for x in self.metadata["angles_rad"] + ] + + +class Metadata_SSRL62C_Raw(Metadata): + """ + Raw metadata from SSRL 6-2C. Will be created if you import a folder filled with + raw XRMs. + """ + + def __init__(self): + super().__init__() + self.filename = "raw_metadata.json" + self.metadata["metadata_type"] = "SSRL62C_Raw" + self.metadata["data_hierarchy_level"] = 0 + self.data_hierarchy_level = 0 + self.table_label.value = "SSRL 6-2C Raw Metadata" + + def set_attributes_from_metadata(self, projections): + pass + + def set_metadata(self, projections): + self.metadata["scan_info"] = copy.deepcopy(projections.scan_info) + self.metadata["scan_info"]["FILES"] = [ + str(file) for file in projections.scan_info["FILES"] + ] + self.metadata["scan_info_path"] = str(projections.scan_info_path) + self.metadata["run_script_path"] = str(projections.run_script_path) + self.metadata["flats_filenames"] = [ + str(file) for file in projections.flats_filenames + ] + self.metadata["projections_filenames"] = [ + str(file) for file in projections.data_filenames + ] + self.metadata["scan_type"] = projections.scan_type + self.metadata["scan_order"] = projections.scan_order + self.metadata["pxX"] = projections.pxX + self.metadata["pxY"] = projections.pxY + self.metadata["pxZ"] = projections.pxZ + self.metadata["num_angles"] = projections.pxZ + self.metadata["angles_rad"] = projections.angles_rad + self.metadata["angles_deg"] = projections.angles_deg + self.metadata["start_angle"] = float(projections.angles_deg[0]) + self.metadata["end_angle"] = float(projections.angles_deg[-1]) + self.metadata["binning"] = projections.binning + if isinstance(projections.scan_info["PROJECTION_METADATA"], list): + self.metadata["projections_exposure_time"] = projections.scan_info[ + "PROJECTION_METADATA" + ][0]["exposure_time"] + else: + self.metadata["projections_exposure_time"] = projections.scan_info[ + "PROJECTION_METADATA" + ]["exposure_time"] + if isinstance(projections.scan_info["FLAT_METADATA"], list): + self.metadata["references_exposure_time"] = projections.scan_info[ + "FLAT_METADATA" + ][0]["exposure_time"] + else: + self.metadata["references_exposure_time"] = projections.scan_info[ + "FLAT_METADATA" + ]["exposure_time"] + + self.metadata["all_raw_energies_float"] = projections.energies_list_float + self.metadata["all_raw_energies_str"] = projections.energies_list_str + self.metadata["all_raw_pixel_sizes"] = projections.raw_pixel_sizes + self.metadata[ + "pixel_size_from_scan_info" + ] = projections.pixel_size_from_metadata + self.metadata["energy_units"] = "eV" + self.metadata["pixel_units"] = "nm" + self.metadata["raw_projections_dtype"] = str(projections.raw_data_type) + self.metadata["raw_projections_directory"] = str( + projections.data_filenames[0].parent + ) + self.metadata["data_hierarchy_level"] = projections.data_hierarchy_level + + def metadata_to_DataFrame(self): + + # change metadata keys to be better looking + if self.metadata["scan_info"]["VERSION"] == 1: + keys = { + "ENERGY": "Energy", + "TOMO": "Tomo", + "MOSAIC": "Mosaic", + "MULTIEXPOSURE": "MultiExposure", + "NREPEATSCAN": "Repeat Scan", + "WAITNSECS": "Wait (s)", + "NEXPOSURES": "Num. Exposures", + "AVERAGEONTHEFLY": "Average On the Fly", + "REFNEXPOSURES": "Num. Ref Exposures", + "REFEVERYEXPOSURES": "Ref/Num Exposures", + "REFABBA": "Order", + "MOSAICUP": "Up", + "MOSAICDOWN": "Down", + "MOSAICLEFT": "Left", + "MOSAICRIGHT": "Right", + "MOSAICOVERLAP": "Overlap (%)", + "MOSAICCENTRALTILE": "Central Tile", + } + if self.metadata["scan_info"]["VERSION"] == 2: + keys = { + "ENERGY": "Energy", + "TOMO": "Tomo", + "MOSAIC": "Mosaic", + "MULTIEXPOSURE": "MultiExposure", + "NREPEATSCAN": "Repeat Scan", + "WAITNSECS": "Wait (s)", + "NEXPOSURES": "Num. Exposures", + "AVERAGEONTHEFLY": "Average On the Fly", + "IMAGESPERPROJECTION": "Images/Projection", + "REFNEXPOSURES": "Num. Ref Exposures", + "REFEVERYEXPOSURES": "Ref/Num Exposures", + "REFABBA": "Order", + "REFDESPECKLEAVERAGE": "Ref Despeckle Avg", + "APPLYREF": "Ref Applied", + "MOSAICUP": "Up", + "MOSAICDOWN": "Down", + "MOSAICLEFT": "Left", + "MOSAICRIGHT": "Right", + "MOSAICOVERLAP": "Overlap (%)", + "MOSAICCENTRALTILE": "Central Tile", + } + m = {keys[key]: self.metadata["scan_info"][key] for key in keys} + + if m["Order"] == 0: + m["Order"] = "ABAB" + else: + m["Order"] = "ABBA" + + # create headers and data for table + middle_headers = [] + middle_headers.append(["Energy", "Tomo", "Mosaic", "MultiExposure"]) + if self.metadata["scan_info"]["VERSION"] == 1: + middle_headers.append( + [ + "Repeat Scan", + "Wait (s)", + "Num. Exposures", + ] + ) + middle_headers.append(["Num. Ref Exposures", "Ref/Num Exposures", "Order"]) + if self.metadata["scan_info"]["VERSION"] == 2: + middle_headers.append( + [ + "Repeat Scan", + "Wait (s)", + "Num. Exposures", + "Images/Projection", + ] + ) + middle_headers.append( + [ + "Num. Ref Exposures", + "Ref/Num Exposures", + "Order", + "Ref Despeckle Avg", + ] + ) + middle_headers.append(["Up", "Down", "Left", "Right"]) + top_headers = [] + top_headers.append(["Layers"]) + top_headers.append(["Image Information"]) + top_headers.append(["Acquisition Information"]) + top_headers.append(["Reference Information"]) + top_headers.append(["Mosaic Information"]) + data = [ + [m[key] for key in middle_headers[i]] for i in range(len(middle_headers)) + ] + middle_headers.insert(1, ["X Pixels", "Y Pixels", "Num. θ"]) + data.insert( + 1, + [ + self.metadata["pxX"], + self.metadata["pxY"], + len(self.metadata["angles_rad"]), + ], + ) + + # create dataframe with the above settings + df = pd.DataFrame( + [data[0]], + columns=pd.MultiIndex.from_product([top_headers[0], middle_headers[0]]), + ) + for i in range(len(middle_headers)): + if i == 0: + continue + else: + newdf = pd.DataFrame( + [data[i]], + columns=pd.MultiIndex.from_product( + [top_headers[i], middle_headers[i]] + ), + ) + df = df.join(newdf) + + # set datatable styles + s = df.style.hide(axis="index") + s.set_table_styles( + { + ("Acquisition Information", "Repeat Scan"): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + ("Image Information", "X Pixels"): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + ("Reference Information", "Num. Ref Exposures"): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + ("Reference Information", "Num. Ref Exposures"): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + ("Mosaic Information", "Up"): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + }, + overwrite=False, + ) + s.set_table_styles( + [ + {"selector": "th.col_heading", "props": "text-align: center;"}, + {"selector": "th.col_heading.level0", "props": "font-size: 1.2em;"}, + {"selector": "td", "props": "text-align: center;" "font-size: 1.2em;"}, + { + "selector": "th:not(.index_name)", + "props": "background-color: #0F52BA; color: white;", + }, + ], + overwrite=False, + ) + + self.dataframe = s + + +class Metadata_SSRL62C_Prenorm(Metadata_SSRL62C_Raw): + """ + Metadata class for data from SSRL 6-2C that was normalized using TomoPyUI. + """ + + def __init__(self): + super().__init__() + self.filename = "import_metadata.json" + self.metadata["metadata_type"] = "SSRL62C_Normalized" + self.metadata["data_hierarchy_level"] = 1 + self.table_label.value = "SSRL 6-2C TomoPyUI-Imported Metadata" + + def set_metadata(self, projections): + super().set_metadata(projections) + metadata_to_remove = [ + "scan_info_path", + "run_script_path", + "scan_info", + "scan_type", + "scan_order", + "all_raw_energies_float", + "all_raw_energies_str", + "all_raw_pixel_sizes", + "pixel_size_from_scan_info", + "raw_projections_dtype", + ] + # removing unneeded things from parent raw + [ + self.metadata.pop(name) + for name in metadata_to_remove + if name in self.metadata + ] + self.metadata["flats_ind"] = projections.flats_ind + self.metadata["user_overwrite_energy"] = projections.user_overwrite_energy + self.metadata["energy_str"] = projections.energy_str + self.metadata["energy_float"] = projections.energy_float + self.metadata["pixel_size"] = projections.px_size + self.metadata["normalized_projections_dtype"] = str(np.dtype(np.float32)) + self.metadata["normalized_projections_size_gb"] = projections.size_gb + self.metadata["normalized_projections_directory"] = str( + projections.import_savedir + ) + self.metadata[ + "normalized_projections_filename" + ] = projections.normalized_projections_hdf_key + self.metadata["normalization_function"] = "dask" + self.metadata["saved_as_tiff"] = projections.saved_as_tiff + + def metadata_to_DataFrame(self): + # create headers and data for table + px_size = self.metadata["pixel_size"] + px_units = self.metadata["pixel_units"] + en_units = self.metadata["energy_units"] + start_angle = self.metadata["start_angle"] + end_angle = self.metadata["end_angle"] + if isinstance(self.metadata["projections_exposure_time"], list): + exp_time_proj = f"{self.metadata['projections_exposure_time'][0]:0.2f}" + else: + exp_time_proj = f"{self.metadata['projections_exposure_time']:0.2f}" + if isinstance(self.metadata["references_exposure_time"], list): + exp_time_ref = f"{self.metadata['references_exposure_time'][0]:0.2f}" + else: + exp_time_ref = f"{self.metadata['references_exposure_time']:0.2f}" + if self.metadata["user_overwrite_energy"]: + user_overwrite = "Yes" + else: + user_overwrite = "No" + if self.metadata["saved_as_tiff"]: + save_as_tiff = "Yes" + else: + save_as_tiff = "No" + self.metadata_list_for_table = [ + { + f"Energy ({en_units})": self.metadata["energy_str"], + f"Pixel Size ({px_units})": f"{px_size:0.2f}", + "Start θ (°)": f"{start_angle:0.1f}", + "End θ (°)": f"{end_angle:0.1f}", + # "Scan Type": self.metadata["scan_type"], + "Ref. Exp. Time": exp_time_ref, + "Proj. Exp. Time": exp_time_proj, + }, + { + "X Pixels": self.metadata["pxX"], + "Y Pixels": self.metadata["pxY"], + "Num. θ": self.metadata["num_angles"], + "Binning": self.metadata["binning"], + }, + { + "Energy Overwritten": user_overwrite, + ".tif Saved": save_as_tiff, + }, + ] + middle_headers = [[]] + data = [[]] + for i in range(len(self.metadata_list_for_table)): + middle_headers.append([key for key in self.metadata_list_for_table[i]]) + data.append( + [ + self.metadata_list_for_table[i][key] + for key in self.metadata_list_for_table[i] + ] + ) + data.pop(0) + middle_headers.pop(0) + top_headers = [["Acquisition Information"]] + top_headers.append(["Image Information"]) + top_headers.append(["Other Information"]) + + # create dataframe with the above settings + df = pd.DataFrame( + [data[0]], + columns=pd.MultiIndex.from_product([top_headers[0], middle_headers[0]]), + ) + for i in range(len(middle_headers)): + if i == 0: + continue + else: + newdf = pd.DataFrame( + [data[i]], + columns=pd.MultiIndex.from_product( + [top_headers[i], middle_headers[i]] + ), + ) + df = df.join(newdf) + + # set datatable styles + s = df.style.hide(axis="index") + s.set_table_styles( + { + ("Image Information", middle_headers[1][0]): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + ("Other Information", middle_headers[2][0]): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + }, + overwrite=False, + ) + s.set_table_styles( + [ + {"selector": "th.col_heading", "props": "text-align: center;"}, + {"selector": "th.col_heading.level0", "props": "font-size: 1.2em;"}, + {"selector": "td", "props": "text-align: center;" "font-size: 1.2em;"}, + { + "selector": "th:not(.index_name)", + "props": "background-color: #0F52BA; color: white;", + }, + ], + overwrite=False, + ) + + self.dataframe = s + + def set_attributes_from_metadata(self, projections): + projections.pxX = self.metadata["pxX"] + projections.pxY = self.metadata["pxY"] + projections.pxZ = self.metadata["pxZ"] + projections.angles_rad = self.metadata["angles_rad"] + projections.angles_deg = self.metadata["angles_deg"] + projections.start_angle = self.metadata["start_angle"] + projections.end_angle = self.metadata["end_angle"] + projections.binning = self.metadata["binning"] + projections.user_overwrite_energy = self.metadata["user_overwrite_energy"] + projections.energy_str = self.metadata["energy_str"] + projections.energy_float = self.metadata["energy_float"] + projections.energy_units = self.metadata["energy_units"] + projections.px_size = self.metadata["pixel_size"] + projections.pixel_units = self.metadata["pixel_units"] + # projections.import_savedir = pathlib.Path( + # self.metadata["normalized_projections_directory"] + # ) + if "downsampled_projections_directory" in self.metadata: + projections.filedir_ds = pathlib.Path( + self.metadata["downsampled_projections_directory"] + ) + if "flats_ind" in self.metadata: + projections.flats_ind = self.metadata["flats_ind"] + projections.saved_as_tiff = self.metadata["saved_as_tiff"] + + +class Metadata_SSRL62B_Raw_Projections(Metadata): + """ + Raw projections metadata from SSRL 6-2B. + """ + + summary_key = "Summary" + coords_default_key = r"Coords-Default/" + metadata_default_key = r"Metadata-Default/" + + def __init__(self): + super().__init__() + self.loaded_metadata = False # did we load metadata yet? no + self.filename = "raw_metadata.json" + self.metadata["metadata_type"] = "SSRL62B_Raw_Projections" + self.metadata["data_hierarchy_level"] = 0 + self.data_hierarchy_level = 0 + self.table_label.value = "SSRL 6-2B Raw Projections Metadata" + + def parse_raw_metadata(self): + self.load_metadata() + self.summary = self.imported_metadata["Summary"].copy() + self.metadata["acquisition_name"] = self.summary["Prefix"] + self.metadata["angular_resolution"] = self.summary["z-step_um"] / 1000 + self.metadata["pxZ"] = self.summary["Slices"] + self.metadata["num_angles"] = self.metadata["pxZ"] + self.metadata["pixel_type"] = self.summary["PixelType"] + self.meta_keys = [ + key for key in self.imported_metadata.keys() if "Metadata-Default" in key + ] + self.metadata["angles_deg"] = [ + self.imported_metadata[key]["ZPositionUm"] / 1000 for key in self.meta_keys + ] + self.metadata["angles_rad"] = [ + x * np.pi / 180 for x in self.metadata["angles_deg"] + ] + self.metadata["start_angle"] = self.metadata["angles_deg"][0] + self.metadata["end_angle"] = self.metadata["angles_deg"][-1] + self.metadata["exposure_times_ms"] = [ + self.imported_metadata[key]["Exposure-ms"] for key in self.meta_keys + ] + self.metadata["average_exposure_time"] = np.mean( + self.metadata["exposure_times_ms"] + ) + self.metadata["elapsed_times_ms"] = [ + self.imported_metadata[key]["ElapsedTime-ms"] for key in self.meta_keys + ] + self.metadata["received_times"] = [ + self.imported_metadata[key]["ReceivedTime"] for key in self.meta_keys + ] + self.metadata["filenames"] = [ + key.replace(r"Metadata-Default/", "") for key in self.meta_keys + ] + self.metadata["widths"] = [ + self.imported_metadata[key]["Width"] for key in self.meta_keys + ] + self.metadata["heights"] = [ + self.imported_metadata[key]["Height"] for key in self.meta_keys + ] + self.metadata["binnings"] = [ + self.imported_metadata[key]["Binning"] for key in self.meta_keys + ] + self.metadata["pxX"] = self.metadata["heights"][0] + self.metadata["pxY"] = self.metadata["widths"][0] + self.loaded_metadata = True + + def set_extra_metadata(self, Uploader): + self.metadata["energy_float"] = Uploader.energy_textbox.value + self.metadata["energy_str"] = f"{self.metadata['energy_float']:0.2f}" + self.metadata["energy_units"] = Uploader.energy_units_dropdown.value + self.metadata["pixel_size"] = Uploader.px_size_textbox.value + self.metadata["pixel_units"] = Uploader.px_units_dropdown.value + + def load_metadata(self): + with open(self.filepath) as f: + self.imported_metadata = json.load(f) + return self.imported_metadata + + def set_attributes_from_metadata(self, projections): + projections.binning = self.metadata["binnings"][0] + projections.num_angles = self.metadata["num_angles"] + projections.angles_deg = self.metadata["angles_deg"] + projections.angles_rad = self.metadata["angles_rad"] + projections.start_angle = self.metadata["start_angle"] + projections.end_angle = self.metadata["end_angle"] + projections.start_angle = self.metadata["start_angle"] + projections.pxZ = self.metadata["pxZ"] + projections.pxY = self.metadata["pxY"] + projections.pxX = self.metadata["pxX"] + projections.energy_float = self.metadata["energy_float"] + projections.energy_str = self.metadata["energy_str"] + projections.energy_units = self.metadata["energy_units"] + projections.pixel_size = self.metadata["pixel_size"] + projections.pixel_units = self.metadata["pixel_units"] + projections.projections_exposure_time = self.metadata["average_exposure_time"] + projections.acquisition_name = self.metadata["acquisition_name"] + + def set_metadata(self, projections): + pass + + def metadata_to_DataFrame(self): + # create headers and data for table + px_size = self.metadata["pixel_size"] + px_units = self.metadata["pixel_units"] + en_units = self.metadata["energy_units"] + start_angle = self.metadata["start_angle"] + end_angle = self.metadata["end_angle"] + self.metadata_list_for_table = [ + { + f"Energy ({en_units})": self.metadata["energy_str"], + f"Pixel Size ({px_units})": f"{px_size:0.2f}", + "Start θ (°)": f"{start_angle:0.1f}", + "End θ (°)": f"{end_angle:0.1f}", + "Exp. Time (ms)": f"{self.metadata['average_exposure_time']:0.2f}", + }, + { + "X Pixels": self.metadata["pxX"], + "Y Pixels": self.metadata["pxY"], + "Num. θ": self.metadata["num_angles"], + "Binning": self.metadata["binnings"][0], + }, + ] + middle_headers = [[]] + data = [[]] + for i in range(len(self.metadata_list_for_table)): + middle_headers.append([key for key in self.metadata_list_for_table[i]]) + data.append( + [ + self.metadata_list_for_table[i][key] + for key in self.metadata_list_for_table[i] + ] + ) + data.pop(0) + middle_headers.pop(0) + top_headers = [["Acquisition Information"]] + top_headers.append(["Image Information"]) + # top_headers.append(["Other Information"]) + + # create dataframe with the above settings + df = pd.DataFrame( + [data[0]], + columns=pd.MultiIndex.from_product([top_headers[0], middle_headers[0]]), + ) + for i in range(len(middle_headers)): + if i == 0: + continue + else: + newdf = pd.DataFrame( + [data[i]], + columns=pd.MultiIndex.from_product( + [top_headers[i], middle_headers[i]] + ), + ) + df = df.join(newdf) + + # set datatable styles + s = df.style.hide(axis="index") + s.set_table_styles( + { + ("Image Information", middle_headers[1][0]): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + # ("Other Information", middle_headers[2][0]): [ + # {"selector": "td", "props": "border-left: 1px solid white"}, + # {"selector": "th", "props": "border-left: 1px solid white"}, + # ], + }, + overwrite=False, + ) + s.set_table_styles( + [ + {"selector": "th.col_heading", "props": "text-align: center;"}, + {"selector": "th.col_heading.level0", "props": "font-size: 1.2em;"}, + {"selector": "td", "props": "text-align: center;" "font-size: 1.2em;"}, + { + "selector": "th:not(.index_name)", + "props": "background-color: #0F52BA; color: white;", + }, + ], + overwrite=False, + ) + + self.dataframe = s + + +class Metadata_SSRL62B_Raw_References(Metadata_SSRL62B_Raw_Projections): + """ + Raw reference metadata from SSRL 6-2B. + """ + + def __init__(self): + super().__init__() + self.filename = "raw_metadata.json" + self.metadata["metadata_type"] = "SSRL62B_Raw_References" + self.metadata["data_hierarchy_level"] = 0 + self.data_hierarchy_level = 0 + self.table_label.value = "SSRL 6-2B Raw References Metadata" + + def metadata_to_DataFrame(self): + # create headers and data for table + px_size = self.metadata["pixel_size"] + px_units = self.metadata["pixel_units"] + en_units = self.metadata["energy_units"] + start_angle = self.metadata["start_angle"] + end_angle = self.metadata["end_angle"] + self.metadata_list_for_table = [ + { + f"Energy ({en_units})": self.metadata["energy_str"], + f"Pixel Size ({px_units})": f"{px_size:0.2f}", + # "Start θ (°)": f"{start_angle:0.1f}", + # "End θ (°)": f"{end_angle:0.1f}", + "Exp. Time (ms)": f"{self.metadata['average_exposure_time']:0.2f}", + }, + { + "X Pixels": self.metadata["pxX"], + "Y Pixels": self.metadata["pxY"], + "Num. Refs": len(self.metadata["widths"]), + "Binning": self.metadata["binnings"][0], + }, + ] + middle_headers = [[]] + data = [[]] + for i in range(len(self.metadata_list_for_table)): + middle_headers.append([key for key in self.metadata_list_for_table[i]]) + data.append( + [ + self.metadata_list_for_table[i][key] + for key in self.metadata_list_for_table[i] + ] + ) + data.pop(0) + middle_headers.pop(0) + top_headers = [["Acquisition Information"]] + top_headers.append(["Image Information"]) + # top_headers.append(["Other Information"]) + + # create dataframe with the above settings + df = pd.DataFrame( + [data[0]], + columns=pd.MultiIndex.from_product([top_headers[0], middle_headers[0]]), + ) + for i in range(len(middle_headers)): + if i == 0: + continue + else: + newdf = pd.DataFrame( + [data[i]], + columns=pd.MultiIndex.from_product( + [top_headers[i], middle_headers[i]] + ), + ) + df = df.join(newdf) + + # set datatable styles + s = df.style.hide(axis="index") + s.set_table_styles( + { + ("Image Information", middle_headers[1][0]): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + # ("Other Information", middle_headers[2][0]): [ + # {"selector": "td", "props": "border-left: 1px solid white"}, + # {"selector": "th", "props": "border-left: 1px solid white"}, + # ], + }, + overwrite=False, + ) + s.set_table_styles( + [ + {"selector": "th.col_heading", "props": "text-align: center;"}, + {"selector": "th.col_heading.level0", "props": "font-size: 1.2em;"}, + {"selector": "td", "props": "text-align: center;" "font-size: 1.2em;"}, + { + "selector": "th:not(.index_name)", + "props": "background-color: #0F52BA; color: white;", + }, + ], + overwrite=False, + ) + + self.dataframe = s + + +class Metadata_SSRL62B_Raw(Metadata_SSRL62B_Raw_Projections): + """ + Raw reference metadata from SSRL 6-2B. + """ + + def __init__(self, metadata_projections, metadata_references): + super().__init__() + self.metadata_projections = metadata_projections + self.metadata_references = metadata_references + self.metadata["projections_metadata"] = self.metadata_projections.metadata + self.metadata["references_metadata"] = self.metadata_references.metadata + self.filename = "raw_metadata.json" + self.metadata["metadata_type"] = "SSRL62B_Raw" + self.metadata["data_hierarchy_level"] = 0 + self.data_hierarchy_level = 0 + self.table_label.value = "SSRL 6-2B Raw Metadata" + + def metadata_to_DataFrame(self): + # create headers and data for table + self.metadata_projections.create_metadata_box() + self.metadata_references.create_metadata_box() + + def create_metadata_hbox(self): + """ + Creates the box to be displayed on the frontend when importing data. Has both + a label and the metadata dataframe (stored in table_output). + + """ + self.metadata_to_DataFrame() + self.table_output = Output() + if ( + self.metadata_projections.dataframe is not None + and self.metadata_references.dataframe is not None + ): + self.metadata_hbox = HBox( + [ + self.metadata_projections.metadata_vbox, + self.metadata_references.metadata_vbox, + ], + layout=Layout(justify_content="center"), + ) + + +class Metadata_SSRL62B_Prenorm(Metadata_SSRL62B_Raw_Projections): + """ + Metadata class for data from SSRL 6-2C that was normalized using TomoPyUI. + """ + + def __init__(self): + super().__init__() + self.filename = "import_metadata.json" + self.metadata["metadata_type"] = "SSRL62B_Normalized" + self.metadata["data_hierarchy_level"] = 1 + self.table_label.value = "SSRL 6-2B TomoPyUI-Imported Metadata" + + def set_metadata(self, projections): + self.metadata["num_angles"] = projections.num_angles + self.metadata["angles_deg"] = projections.angles_deg + self.metadata["angles_rad"] = projections.angles_rad + self.metadata["start_angle"] = projections.start_angle + self.metadata["end_angle"] = projections.end_angle + self.metadata["start_angle"] = projections.start_angle + self.metadata["pxZ"] = projections.pxZ + self.metadata["pxY"] = projections.pxY + self.metadata["pxX"] = projections.pxX + self.metadata["energy_float"] = projections.energy_float + self.metadata["energy_str"] = projections.energy_str + self.metadata["energy_units"] = projections.energy_units + self.metadata["pixel_size"] = projections.pixel_size + self.metadata["pixel_units"] = projections.pixel_units + self.metadata["binning"] = projections.binning + self.metadata["average_exposure_time"] = projections.projections_exposure_time + self.metadata["acquisition_name"] = projections.acquisition_name + self.metadata["saved_as_tiff"] = projections.saved_as_tiff + + def metadata_to_DataFrame(self): + # create headers and data for table + px_size = self.metadata["pixel_size"] + px_units = self.metadata["pixel_units"] + en_units = self.metadata["energy_units"] + start_angle = self.metadata["start_angle"] + end_angle = self.metadata["end_angle"] + exp_time_proj = f"{self.metadata['average_exposure_time']:0.2f}" + if self.metadata["saved_as_tiff"]: + save_as_tiff = "Yes" + else: + save_as_tiff = "No" + self.metadata_list_for_table = [ + { + f"Energy ({en_units})": self.metadata["energy_str"], + f"Pixel Size ({px_units})": f"{px_size:0.2f}", + "Start θ (°)": f"{start_angle:0.1f}", + "End θ (°)": f"{end_angle:0.1f}", + # "Scan Type": self.metadata["scan_type"], + "Proj. Exp. Time": exp_time_proj, + }, + { + "X Pixels": self.metadata["pxX"], + "Y Pixels": self.metadata["pxY"], + "Num. θ": self.metadata["num_angles"], + "Binning": self.metadata["binning"], + }, + { + ".tif Saved": save_as_tiff, + }, + ] + middle_headers = [[]] + data = [[]] + for i in range(len(self.metadata_list_for_table)): + middle_headers.append([key for key in self.metadata_list_for_table[i]]) + data.append( + [ + self.metadata_list_for_table[i][key] + for key in self.metadata_list_for_table[i] + ] + ) + data.pop(0) + middle_headers.pop(0) + top_headers = [["Acquisition Information"]] + top_headers.append(["Image Information"]) + top_headers.append(["Other Information"]) + + # create dataframe with the above settings + df = pd.DataFrame( + [data[0]], + columns=pd.MultiIndex.from_product([top_headers[0], middle_headers[0]]), + ) + for i in range(len(middle_headers)): + if i == 0: + continue + else: + newdf = pd.DataFrame( + [data[i]], + columns=pd.MultiIndex.from_product( + [top_headers[i], middle_headers[i]] + ), + ) + df = df.join(newdf) + + # set datatable styles + s = df.style.hide(axis="index") + s.set_table_styles( + { + ("Image Information", middle_headers[1][0]): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + ("Other Information", middle_headers[2][0]): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + }, + overwrite=False, + ) + s.set_table_styles( + [ + {"selector": "th.col_heading", "props": "text-align: center;"}, + {"selector": "th.col_heading.level0", "props": "font-size: 1.2em;"}, + {"selector": "td", "props": "text-align: center;" "font-size: 1.2em;"}, + { + "selector": "th:not(.index_name)", + "props": "background-color: #0F52BA; color: white;", + }, + ], + overwrite=False, + ) + + self.dataframe = s + + def set_attributes_from_metadata(self, projections): + projections.pxX = self.metadata["pxX"] + projections.pxY = self.metadata["pxY"] + projections.pxZ = self.metadata["pxZ"] + projections.angles_rad = self.metadata["angles_rad"] + projections.angles_deg = self.metadata["angles_deg"] + projections.start_angle = self.metadata["start_angle"] + projections.end_angle = self.metadata["end_angle"] + projections.binning = self.metadata["binning"] + projections.energy_str = self.metadata["energy_str"] + projections.energy_float = self.metadata["energy_float"] + projections.energy_units = self.metadata["energy_units"] + projections.px_size = self.metadata["pixel_size"] + projections.pixel_units = self.metadata["pixel_units"] + projections.saved_as_tiff = self.metadata["saved_as_tiff"] + projections.num_angles = self.metadata["num_angles"] + projections.acquisition_name = self.metadata["acquisition_name"] + projections.exposure_time = self.metadata["average_exposure_time"] + + +class Metadata_ALS_832_Raw(Metadata): + def __init__(self): + super().__init__() + self.filename = "raw_metadata.json" + self.metadata["metadata_type"] = "ALS832_Raw" + self.metadata["data_hierarchy_level"] = 0 + self.table_label.value = "ALS 8.3.2 Metadata" + + def set_metadata(self, projections): + + self.metadata["numslices"] = projections.pxY + self.metadata["numrays"] = projections.pxX + self.metadata["num_angles"] = projections.pxZ + self.metadata["pxsize"] = projections.px_size + self.metadata["px_size_units"] = "cm" + self.metadata["propagation_dist"] = projections.propagation_dist + self.metadata["propagation_dist_units"] = "mm" + self.metadata["angularrange"] = projections.angular_range + self.metadata["kev"] = projections.energy + self.metadata["energy_units"] = "keV" + if projections.angles_deg is not None: + self.metadata["angles_deg"] = list(projections.angles_deg) + self.metadata["angles_rad"] = list(projections.angles_rad) + + def set_attributes_from_metadata(self, projections): + projections.pxY = self.metadata["numslices"] + projections.pxX = self.metadata["numrays"] + projections.pxZ = self.metadata["num_angles"] + projections.px_size = self.metadata["pxsize"] + projections.px_size_units = self.metadata["px_size_units"] + projections.propagation_dist = self.metadata["propagation_dist"] + projections.propagation_dist_units = "mm" + projections.angular_range = self.metadata["angularrange"] + projections.energy = self.metadata["kev"] + projections.units = self.metadata["energy_units"] + + def load_metadata_h5(self, h5_filepath): + self.filedir = h5_filepath.parent + self.filepath = h5_filepath + self.metadata["pxY"] = int( + dxchange.read_hdf5( + h5_filepath, "/measurement/instrument/detector/dimension_y" + )[0] + ) + self.metadata["numslices"] = self.metadata["pxY"] + self.metadata["pxX"] = int( + dxchange.read_hdf5( + h5_filepath, "/measurement/instrument/detector/dimension_x" + )[0] + ) + self.metadata["numrays"] = self.metadata["pxX"] + self.metadata["pxZ"] = int( + dxchange.read_hdf5(h5_filepath, "/process/acquisition/rotation/num_angles")[ + 0 + ] + ) + self.metadata["num_angles"] = self.metadata["pxZ"] + self.metadata["pxsize"] = ( + dxchange.read_hdf5( + h5_filepath, "/measurement/instrument/detector/pixel_size" + )[0] + / 10.0 + ) # /10 to convert units from mm to cm + self.metadata["px_size_units"] = "cm" + self.metadata["propagation_dist"] = dxchange.read_hdf5( + h5_filepath, + "/measurement/instrument/camera_motor_stack/setup/camera_distance", + )[1] + self.metadata["energy_float"] = ( + dxchange.read_hdf5( + h5_filepath, "/measurement/instrument/monochromator/energy" + )[0] + / 1000 + ) + self.metadata["kev"] = self.metadata["energy_float"] + self.metadata["energy_str"] = str(self.metadata["energy_float"]) + self.metadata["energy_units"] = "keV" + self.metadata["angularrange"] = dxchange.read_hdf5( + h5_filepath, "/process/acquisition/rotation/range" + )[0] + + def metadata_to_DataFrame(self): + + # create headers and data for table + top_headers = [] + middle_headers = [] + data = [] + # Image information + top_headers.append(["Image Information"]) + middle_headers.append(["X Pixels", "Y Pixels", "Num. θ"]) + data.append( + [ + self.metadata["numrays"], + self.metadata["numslices"], + self.metadata["num_angles"], + ] + ) + + top_headers.append(["Experiment Settings"]) + middle_headers.append( + ["Energy (keV)", "Propagation Distance (mm)", "Angular range (deg)"] + ) + data.append( + [ + self.metadata["kev"], + self.metadata["propagation_dist"], + self.metadata["angularrange"], + ] + ) + + # create dataframe with the above settings + df = pd.DataFrame( + [data[0]], + columns=pd.MultiIndex.from_product([top_headers[0], middle_headers[0]]), + ) + for i in range(len(middle_headers)): + if i == 0: + continue + else: + newdf = pd.DataFrame( + [data[i]], + columns=pd.MultiIndex.from_product( + [top_headers[i], middle_headers[i]] + ), + ) + df = df.join(newdf) + + # set datatable styles + s = df.style.hide(axis="index") + s.set_table_styles( + { + ("Experiment Settings", "Energy (keV)"): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + }, + overwrite=False, + ) + + s.set_table_styles( + [ + {"selector": "th.col_heading", "props": "text-align: center;"}, + {"selector": "th.col_heading.level0", "props": "font-size: 1.2em;"}, + {"selector": "td", "props": "text-align: center;" "font-size: 1.2em;"}, + { + "selector": "th:not(.index_name)", + "props": "background-color: #0F52BA; color: white;", + }, + ], + overwrite=False, + ) + + self.dataframe = s + + +class Metadata_ALS_832_Prenorm(Metadata_ALS_832_Raw): + def __init__(self): + super().__init__() + self.filename = "import_metadata.json" + self.metadata["metadata_type"] = "ALS832_Normalized" + self.metadata["data_hierarchy_level"] = 1 + self.data_hierarchy_level = self.metadata["data_hierarchy_level"] + self.table_label.value = "" + + def set_metadata(self, projections): + super().set_metadata(projections) + self.filename = "import_metadata.json" + self.metadata["metadata_type"] = "ALS832_Normalized" + self.metadata["data_hierarchy_level"] = 1 + + def set_attributes_from_metadata(self, projections): + projections.pxY = self.metadata["numslices"] + projections.pxX = self.metadata["numrays"] + projections.pxZ = self.metadata["num_angles"] + projections.px_size = self.metadata["pxsize"] + projections.px_size_units = self.metadata["px_size_units"] + projections.energy = self.metadata["kev"] / 1000 + projections.units = "eV" + projections.angles_deg = self.metadata["angles_deg"] + projections.angles_rad = self.metadata["angles_rad"] + projections.angle_start = projections.angles_rad[0] + projections.angle_end = projections.angles_rad[-1] + + def metadata_to_DataFrame(self): + self.dataframe = None + + def create_metadata_box(self): + """ + Method overloaded because the metadata table is the same as the superclass. + This avoids a space between tables during display. + """ + self.metadata_vbox = Output() + + +class Metadata_APS_Raw(Metadata): + # Francesco: you will need to edit here. + def __init__(self): + super().__init__() + self.filename = "raw_metadata.json" + self.metadata["metadata_type"] = "APS_Raw" + self.metadata["data_hierarchy_level"] = 0 + self.table_label.value = "APS Metadata" + + # hdf file key definitions + self.energy_key = 'measurement_instrument_monochromator_energy' + self.resolution_key = 'measurement_instrument_detection_system_objective_resolution' + self.rotation_start_key = 'process_acquisition_rotation_start' + self.angle_step_key = 'process_acquisition_rotation_step' + self.num_angle_key = 'process_acquisition_rotation_num_angles' + self.width_key = 'measurement_instrument_detector_array_size_x' + self.height_key = 'measurement_instrument_detector_array_size_y' + self.camera_distance_key = 'measurement_instrument_detector_motor_stack_setup_z' + + def set_metadata(self, projections): + """ + Sets metadata from the APS h5 filetype + """ + self.metadata["numslices"] = projections.pxY + self.metadata["numrays"] = projections.pxX + self.metadata["num_angles"] = projections.pxZ + self.metadata["pxsize"] = projections.px_size + self.metadata["px_size_units"] = "cm" + self.metadata["propagation_dist"] = projections.propagation_dist + self.metadata["propagation_dist_units"] = "mm" + self.metadata["angularrange"] = projections.angular_range + self.metadata["kev"] = projections.energy + self.metadata["energy_units"] = "keV" + if projections.angles_deg is not None: + self.metadata["angles_deg"] = list(projections.angles_deg) + self.metadata["angles_rad"] = list(projections.angles_rad) + + def set_attributes_from_metadata(self, projections): + projections.pxY = self.metadata["numslices"] + projections.pxX = self.metadata["numrays"] + projections.pxZ = self.metadata["num_angles"] + projections.px_size = self.metadata["pxsize"] + projections.px_size_units = self.metadata["px_size_units"] + projections.propagation_dist = self.metadata["propagation_dist"] + projections.propagation_dist_units = "mm" + projections.angular_range = self.metadata["angularrange"] + projections.energy = self.metadata["kev"] + projections.units = self.metadata["energy_units"] + + def load_metadata_h5(self, h5_filepath): + """ + Loads in metadata from h5 file. You can probably use your dxchange function + to read all the metadata in at once. Not sure how it works for you. + + The keys in the self.metadata dictionary can be whatever you want, as long as + your set_attributes_from_metadata function above sets the values correctly. + """ + # set metadata filepath to the filepath above + self.filedir = h5_filepath.parent + self.filepath = h5_filepath + + # Here you will set your metadata. I have left these here from the ALS metadata + # class for reference. Some things are not inside the metadata (i.e. + # "energy_units") that I set manually. + + + _, meta = dxchange.read_hdf_meta(h5_filepath) + + self.metadata["pxY"] = int(meta[self.height_key][0]) + self.metadata["numslices"] = self.metadata["pxY"] + self.metadata["pxX"] = int(meta[self.width_key][0]) + self.metadata["numrays"] = self.metadata["pxX"] + self.metadata["pxZ"] = int(meta[self.num_angle_key][0]) + self.metadata["num_angles"] = self.metadata["pxZ"] + self.metadata["pxsize"] = float(meta[self.resolution_key][0]) + self.metadata["px_size_units"] = meta[self.resolution_key][1] + self.metadata["propagation_dist"] = float(meta[self.camera_distance_key][0]) + self.metadata["propagation_dist_units"] = float(meta[self.camera_distance_key][1]) + self.metadata["energy_float"] = float(meta[self.energy_key][0]) + self.metadata["kev"] = self.metadata["energy_float"] + self.metadata["energy_str"] = str(meta[self.energy_key][0]) + self.metadata["energy_units"] = meta[self.energy_key][1] + self.metadata["angularrange"] = float(meta[self.angle_step_key][0]) * float(meta[self.num_angle_key][0]) + + def metadata_to_DataFrame(self): + + # create headers and data for table + top_headers = [] + middle_headers = [] + data = [] + # Image information + top_headers.append(["Image Information"]) + middle_headers.append(["X Pixels", "Y Pixels", "Num. θ"]) + data.append( + [ + self.metadata["numrays"], + self.metadata["numslices"], + self.metadata["num_angles"], + ] + ) + + top_headers.append(["Experiment Settings"]) + middle_headers.append( + ["Energy (keV)", "Propagation Distance (mm)", "Angular range (deg)"] + ) + data.append( + [ + self.metadata["kev"], + self.metadata["propagation_dist"], + self.metadata["angularrange"], + ] + ) + + # create dataframe with the above settings + df = pd.DataFrame( + [data[0]], + columns=pd.MultiIndex.from_product([top_headers[0], middle_headers[0]]), + ) + for i in range(len(middle_headers)): + if i == 0: + continue + else: + newdf = pd.DataFrame( + [data[i]], + columns=pd.MultiIndex.from_product( + [top_headers[i], middle_headers[i]] + ), + ) + df = df.join(newdf) + + # set datatable styles + s = df.style.hide(axis="index") + s.set_table_styles( + { + ("Experiment Settings", "Energy (keV)"): [ + {"selector": "td", "props": "border-left: 1px solid white"}, + {"selector": "th", "props": "border-left: 1px solid white"}, + ], + }, + overwrite=False, + ) + + s.set_table_styles( + [ + {"selector": "th.col_heading", "props": "text-align: center;"}, + {"selector": "th.col_heading.level0", "props": "font-size: 1.2em;"}, + {"selector": "td", "props": "text-align: center;" "font-size: 1.2em;"}, + { + "selector": "th:not(.index_name)", + "props": "background-color: #0F52BA; color: white;", + }, + ], + overwrite=False, + ) + + self.dataframe = s + + +class Metadata_APS_Prenorm(Metadata_APS_Raw): + """ + Prenormalized metadata class. The table produced by this function may look nearly + the same for you. For the SSRL version, it looks very different because there is a + lot of excess information that I store in the SSRL raw metadata file. + + It is important to have this because "import_metadata.json" will be stored in a + subfolder of the parent, raw data. + + Because the APS prenormalized metadata table looks identical to the raw metadata + table, I overloaded the create_metadata_box() function to be just an Output widget. + + You can get as fancy as you want with this. + + # Francesco: you will need to edit here. + """ + + def __init__(self): + super().__init__() + self.filename = "import_metadata.json" + self.metadata["metadata_type"] = "APS_Normalized" + self.metadata["data_hierarchy_level"] = 1 + self.data_hierarchy_level = self.metadata["data_hierarchy_level"] + self.table_label.value = "" + + def set_metadata(self, projections): + super().set_metadata(projections) + self.filename = "import_metadata.json" + self.metadata["metadata_type"] = "APS_Normalized" + self.metadata["data_hierarchy_level"] = 1 + + def set_attributes_from_metadata(self, projections): + projections.pxY = self.metadata["numslices"] + projections.pxX = self.metadata["numrays"] + projections.pxZ = self.metadata["num_angles"] + projections.px_size = self.metadata["pxsize"] + projections.px_size_units = self.metadata["px_size_units"] + projections.energy = self.metadata["kev"] / 1000 + projections.units = "eV" + projections.angles_deg = self.metadata["angles_deg"] + projections.angles_rad = self.metadata["angles_rad"] + projections.angle_start = projections.angles_rad[0] + projections.angle_end = projections.angles_rad[-1] + + def metadata_to_DataFrame(self): + self.dataframe = None + + def create_metadata_box(self): + """ + Method overloaded because the metadata table is the same as the superclass. + This avoids a space between tables during display. + """ + self.metadata_vbox = Output() + + +class Metadata_Prep(Metadata): + def __init__(self): + super().__init__() + self.table_label.value = "Preprocessing Methods" + self.prep_list_label_style = { + "font_size": "16px", + "font_weight": "bold", + "font_variant": "small-caps", + # "text_color": "#0F52BA", + } + + def set_metadata(self, Prep): + self.metadata["metadata_type"] = "Prep" + self.filename = "prep_metadata.json" + self.parent_metadata = Prep.Import.projections.metadata + self.metadata["parent_metadata"] = self.parent_metadata.metadata + if "data_hierarchy_level" in self.parent_metadata.metadata: + self.metadata["data_hierarchy_level"] = ( + self.parent_metadata.metadata["data_hierarchy_level"] + 1 + ) + else: + self.metadata["data_hierarchy_level"] = 2 + self.metadata["prep_list"] = [ + (x[1].method_name, x[1].opts) for x in Prep.prep_list + ] + self.table_label.value = "Preprocessing Metadata" + + def metadata_to_DataFrame(self): + self.dataframe = None + + def create_metadata_box(self): + display_str = [x[0] + " → " for x in self.metadata["prep_list"][:-1]] + display_str = "".join(display_str + [self.metadata["prep_list"][-1][0]]) + + self.prep_list_label = Label(display_str, style=self.prep_list_label_style) + self.metadata_vbox = VBox( + [self.table_label, self.prep_list_label], + layout=Layout(align_items="center"), + ) + + def set_attributes_from_metadata(self, projections): + pass + + +class Metadata_Align(Metadata): + """ + Works with both Align and RunAlign instances. + """ + + def __init__(self): + super().__init__() + self.filename = "alignment_metadata.json" + self.metadata["opts"] = {} + self.metadata["methods"] = {} + self.metadata["save_opts"] = {} + self.table_label.value = "Alignment Metadata" + + def set_metadata(self, Align): + self.metadata["metadata_type"] = "Align" + self.metadata["opts"]["downsample"] = Align.downsample + self.metadata["opts"]["ds_factor"] = int(Align.ds_factor) + self.metadata["opts"]["pyramid_level"] = Align.pyramid_level + self.metadata["opts"]["num_iter"] = Align.num_iter + self.metadata["use_multiple_centers"] = Align.use_multiple_centers + if self.metadata["use_multiple_centers"] and Align.Center.reg is not None: + self.metadata["opts"]["center"] = Align.Center.reg_centers + else: + self.metadata["opts"]["center"] = Align.center + self.metadata["opts"]["pad"] = ( + Align.padding_x, + Align.padding_y, + ) + self.metadata["opts"]["extra_options"] = Align.extra_options + self.metadata["methods"] = Align.methods_opts + self.metadata["save_opts"] = Align.save_opts + self.metadata["px_range_x"] = Align.altered_viewer.px_range_x + self.metadata["px_range_y"] = Align.altered_viewer.px_range_y + self.metadata["parent_filedir"] = Align.projections.filedir + self.metadata["parent_filename"] = Align.projections.filename + self.metadata["copy_hists_from_parent"] = Align.copy_hists + self.metadata["angles_rad"] = list(Align.projections.angles_rad) + self.metadata["angles_deg"] = list(Align.projections.angles_deg) + self.metadata["angle_start"] = Align.projections.angles_deg[0] + self.metadata["angle_end"] = Align.projections.angles_deg[-1] + self.set_metadata_obj_specific(Align) + + def set_metadata_obj_specific(self, Align): + self.metadata["opts"][ + "shift_full_dataset_after" + ] = Align.shift_full_dataset_after + self.metadata["opts"]["upsample_factor"] = Align.upsample_factor + self.metadata["opts"]["pre_alignment_iters"] = Align.pre_alignment_iters + self.metadata["use_subset_correlation"] = Align.use_subset_correlation + self.metadata["subset_x"] = Align.altered_viewer.subset_x + self.metadata["subset_y"] = Align.altered_viewer.subset_y + self.metadata["opts"]["num_batches"] = Align.num_batches + + def metadata_to_DataFrame(self): + metadata_frame = {} + time, title = parse_printed_time(self.metadata["analysis_time"]) + extra_headers = [ + "Prj X Range", + "Prj Y Range", + "Start Angle", + "End Angle", + title, + ] + metadata_frame["Headers"] = list(self.metadata["opts"].keys()) + center_idx = [ + i for i, key in enumerate(metadata_frame["Headers"]) if key == "center" + ][0] + metadata_frame["Headers"] = [ + metadata_frame["Headers"][i] + .replace("_", " ") + .title() + .replace("Num", "No.") + for i, key in enumerate(metadata_frame["Headers"]) if key != "pyramid_level" + ] + metadata_frame["Headers"] = metadata_frame["Headers"] + extra_headers + extra_values = [ + self.metadata["px_range_x"], + self.metadata["px_range_y"], + self.metadata["angle_start"], + self.metadata["angle_end"], + time, + ] + extra_values = [str(extra_values[i]) for i in range(len(extra_values))] + metadata_frame["Values"] = [ + str(self.metadata["opts"][key]) for key in self.metadata["opts"] if key != "pyramid_level" + ] + extra_values + if "use_multiple_centers" in self.metadata: + if self.metadata["use_multiple_centers"]: + metadata_frame["Values"][center_idx] = "Multiple" + metadata_frame = { + metadata_frame["Headers"][i]: metadata_frame["Values"][i] + for i in range(len(metadata_frame["Headers"])) + } + sr = pd.Series(metadata_frame) + df = pd.DataFrame(sr).transpose() + s = df.style.hide(axis="index") + s.set_table_styles( + [ + {"selector": "th.col_heading", "props": "text-align: center;"}, + {"selector": "th.col_heading.level0", "props": "font-size: 1.2em;"}, + {"selector": "td", "props": "text-align: center;" "font-size: 1.2em; "}, + { + "selector": "th:not(.index_name)", + "props": "background-color: #0F52BA; color: white;", + }, + ], + overwrite=False, + ) + + self.dataframe = s + + def set_attributes_from_metadata(self, Align): + Align.downsample = self.metadata["opts"]["downsample"] + if "ds_factor" in self.metadata["opts"]: + Align.ds_factor = self.metadata["opts"]["ds_factor"] + if "downsample_factor" in self.metadata["opts"]: + Align.ds_factor = self.metadata["opts"]["downsample_factor"] + if "pyramid_level" in self.metadata["opts"]: + Align.pyramid_level = self.metadata["opts"]["pyramid_level"] + if "copy_hists_from_parent" in self.metadata: + Align.copy_hists = self.metadata["copy_hists_from_parent"] + Align.num_iter = self.metadata["opts"]["num_iter"] + Align.center = self.metadata["opts"]["center"] + (Align.padding_x, Align.padding_y) = self.metadata["opts"]["pad"] + Align.pad = (Align.padding_x, Align.padding_y) + Align.extra_options = self.metadata["opts"]["extra_options"] + Align.methods_opts = self.metadata["methods"] + Align.save_opts = self.metadata["save_opts"] + if "use_multiple_centers" not in self.metadata: + Align.use_multiple_centers = False + else: + Align.use_multiple_centers = self.metadata["use_multiple_centers"] + if "px_range_x" in self.metadata.keys(): + Align.px_range_x = self.metadata["px_range_x"] + Align.px_range_y = self.metadata["px_range_y"] + else: + Align.px_range_x = self.metadata["pixel_range_x"] + Align.px_range_y = self.metadata["pixel_range_y"] + self.set_attributes_object_specific(Align) + + def set_attributes_object_specific(self, Align): + if "shift_full_dataset_after" in self.metadata["opts"]: + Align.shift_full_dataset_after = self.metadata["opts"][ + "shift_full_dataset_after" + ] + Align.upsample_factor = self.metadata["opts"]["upsample_factor"] + Align.pre_alignment_iters = self.metadata["opts"]["pre_alignment_iters"] + Align.subset_x = self.metadata["subset_x"] + Align.subset_y = self.metadata["subset_y"] + Align.use_subset_correlation = self.metadata["use_subset_correlation"] + Align.num_batches = self.metadata["opts"]["num_batches"] + + +class Metadata_Recon(Metadata_Align): + def set_metadata(self, Recon): + super().set_metadata(Recon) + self.metadata["metadata_type"] = "Recon" + self.filename = "recon_metadata.json" + self.table_label.value = "Reconstruction Metadata" + + def set_metadata_obj_specific(self, Recon): + pass + + def set_attributes_from_metadata(self, Recon): + super().set_attributes_from_metadata(Recon) + + def set_attributes_object_specific(self, Recon): + pass + + +# https://stackoverflow.com/questions/ +# 51674222/how-to-make-json-dumps-in-python-ignore-a-non-serializable-field +def safe_serialize(obj, f): + default = lambda o: f"<>" + return json.dump(obj, f, default=default, indent=4) + + +def parse_printed_time(timedict): + if timedict["hours"] < 1: + if timedict["minutes"] < 1: + time = timedict["seconds"] + title = "Time (s)" + else: + time = timedict["minutes"] + title = "Time (min)" + else: + time = timedict["hours"] + title = "Time (h)" + + time = f"{time:.1f}" + return time, title + + +def rescale_parallel(i, images=None, ds_factor_list=None): + return rescale(images[i], (ds_factor_list[1], ds_factor_list[2])) + + +def rescale_parallel_pool(n, images, ds_factor_list): + with mp.Pool() as pool: + rescale_partial = partial( + rescale_parallel, images=images, ds_factor_list=ds_factor_list + ) + return pool.map(rescale_partial, range(n)) + + +# ARCHIVE: + +# proj_ind = [ +# True if "ref_" not in file.name else False for file in collect +# ] +# flats_ind_positions = [i for i, val in enumerate(self.flats_ind) if val][ +# :: self.metadata["REFNEXPOSURES"] +# ] +# self.flats_ind = [ +# j for j in flats_ind_positions for i in range(self.metadata["REFNEXPOSURES"]) +# ] + + +# # Groups each set of references and each set of projections together. Unused. +# def group_from_run_script(self): +# all_collections = [[]] +# energies = [[]] +# with open(self.run_script_path, "r") as f: +# for line in f.readlines(): +# if line.startswith("sete "): +# energies.append(f"{float(line[5:]):.2f}") +# all_collections.append([]) +# elif line.startswith("collect "): +# filename = line[8:].strip() +# all_collections[-1].append(self.run_script_path.parent / filename) +# all_collections.pop(0) +# energies.pop(0) +# for energy, collect in zip(energies, all_collections): +# if energy not in self.selected_energies: +# continue +# else: +# # getting all flats/projections +# ref_ind = [True if "ref_" in file.name else False for file in collect] +# i = 0 +# copy_collect = collect.copy() +# for pos, file in enumerate(copy_collect): +# if "ref_" in file.name: +# if i == 0: +# i = 1 +# elif i == 1: +# copy_collect[pos] = 1 +# elif "ref_" not in file.name: +# i = 0 +# copy_collect = [value for value in copy_collect if value != 1] +# ref_ind = [ +# True if "ref_" in file.name else False for file in copy_collect +# ] +# ref_ind = [i for i in range(len(ref_ind)) if ref_ind[i]] +# self.ref_ind = ref_ind + +# proj_ind = [ +# True if "ref_" not in file.name else False for file in collect +# ] +# self.flats_filenames = [ +# file.parent / file.name for file in collect if "ref_" in file.name +# ] +# self.data_filenames = [ +# file.parent / file.name +# for file in collect +# if "ref_" not in file.name +# ] +# # # intitializing switch statements +# files_grouped = [[]] +# file_type = ["reference"] +# i = 0 +# adding_refs = True +# adding_projs = False +# for num, collection in enumerate(collect): +# if ref_ind[num] and adding_refs: +# files_grouped[-1].append(collection) +# elif proj_ind[num] and ref_ind[num - 1]: +# adding_refs = False +# adding_projs = True +# i = 0 +# files_grouped.append([]) +# files_grouped[-1].append(collection) +# file_type.append("projection") +# elif proj_ind[num - 1] and ref_ind[num]: +# adding_refs = True +# adding_projs = False +# i = 0 +# files_grouped.append([]) +# files_grouped[-1].append(collection) +# file_type.append("reference") +# elif adding_projs and i < self.scan_info["NEXPOSURES"] - 1: +# i += 1 +# files_grouped[-1].append(collection) +# else: +# i = 0 +# files_grouped.append([]) +# file_type.append("projection") + +# return files_grouped, file_type + + +# def get_img_shape(self, extension=None): +# """ +# Gets the image shape of a tiff or npy with lazy loading. +# """ + +# if self.extension == ".tif" or self.extension == ".tiff": +# allowed_extensions = [".tiff", ".tif"] +# file_list = [ +# pathlib.PurePath(f) for f in os.scandir(self.filedir) if not f.is_dir() +# ] +# tiff_file_list = [ +# file.name +# for file in file_list +# if any(x in file.name for x in self.allowed_extensions) +# ] +# tiff_count_in_filedir = len(tiff_file_list) +# with tf.TiffFile(self.filepath) as tif: +# # if you select a file instead of a file path, it will try to +# # bring in the full filedir +# if tiff_count_in_filedir > 50: +# sizeX = tif.pages[0].tags["ImageWidth"].value +# sizeY = tif.pages[0].tags["ImageLength"].value +# sizeZ = tiff_count_in_filedir # can maybe use this later +# else: +# imagesize = tif.pages[0].tags["ImageDescription"] +# size = json.loads(imagesize.value)["shape"] +# sizeZ = size[0] +# sizeY = size[1] +# sizeX = size[2] + +# elif self.extension == ".npy": +# size = np.load(self.filepath, mmap_mode="r").shape +# sizeZ = size[0] +# sizeY = size[1] +# sizeX = size[2] + +# return (sizeZ, sizeY, sizeX) diff --git a/tomopyui/backend/runanalysis.py b/tomopyui/backend/runanalysis.py new file mode 100644 index 0000000..a11bc05 --- /dev/null +++ b/tomopyui/backend/runanalysis.py @@ -0,0 +1,442 @@ +import datetime +import json +import os +import tifffile as tf +import numpy as np +import pathlib +import tomopy +import matplotlib.pyplot as plt +import time + +from abc import ABC, abstractmethod +from copy import copy, deepcopy +from time import perf_counter +from skimage.transform import rescale # look for better option +from tomopy.prep.alignment import align_joint as align_joint_tomopy +from tomopyui.backend.util.padding import * +from tomopyui._sharedvars import * +from tomopyui.backend.io import Metadata_Align, Metadata_Recon, Projections_Child +from tomopy.recon import algorithm as tomopy_algorithm +from tomopy.misc.corr import circ_mask +from tomopy.recon import wrappers +from scipy.stats import linregress + +# TODO: make this global +from tomopyui.widgets.helpers import import_module_set_env + +cuda_import_dict = {"cupy": "cuda_enabled"} +import_module_set_env(cuda_import_dict) +if os.environ["cuda_enabled"] == "True": + import astra + import tomopyui.tomocupy.recon.algorithm as tomocupy_algorithm + import cupy as cp + from ..tomocupy.prep.alignment import align_joint as align_joint_cupy + from ..tomocupy.prep.alignment import shift_prj_cp + from tomopyui.widgets.prep import shift_projections + + +class RunAnalysisBase(ABC): + """ + Base class for alignment and reconstruction objects. + """ + + def __init__(self, analysis_parent): + self.recon = None + self.skip_mk_wd_subdir = False + self.analysis_parent = analysis_parent + self.parent_projections = analysis_parent.projections + self.projections = Projections_Child(analysis_parent.projections) + self.metadata.set_metadata(analysis_parent) + self.metadata.set_attributes_from_metadata(self) + self.wd_parent = self.metadata.metadata["parent_filedir"] + self.metadata.metadata["parent_filedir"] = str( + self.metadata.metadata["parent_filedir"] + ) + self.plot_output1 = analysis_parent.plot_output1 + self.plot_output2 = analysis_parent.plot_output2 + self.angles_rad = analysis_parent.projections.angles_rad + self.make_wd() + self.save_overall_metadata() + self.save_data_before_analysis() + self.run() + + def make_wd(self): + """ + Creates a save directory to put projections into. + """ + now = datetime.datetime.now() + dt_str = now.strftime("%Y%m%d-%H%M-") + dt_str = dt_str + self.savedir_suffix + self.wd = self.wd_parent / dt_str + if self.wd.exists(): + dt_str = now.strftime("%Y%m%d-%H%M%S-") + dt_str = dt_str + self.savedir_suffix + self.wd = self.wd_parent / dt_str + self.wd.mkdir() + + def save_overall_metadata(self): + self.metadata.filedir = pathlib.Path(self.wd) + self.metadata.filename = "overall_" + self.savedir_suffix + "_metadata.json" + self.metadata.metadata[ + "parent_metadata" + ] = self.analysis_parent.projections.metadatas[0].metadata.copy() + self.metadata.metadata["data_hierarchy_level"] = ( + self.metadata.metadata["parent_metadata"]["data_hierarchy_level"] + 1 + ) + self.metadata.save_metadata() + + def save_data_before_analysis(self): + if self.metadata.metadata["save_opts"]["Projections Before Alignment"]: + save_str = "projections_before_" + self.savedir_suffix + save_str_tif = "projections_before_" + self.savedir_suffix + ".tif" + tf.imwrite( + self.metadata.filedir / save_str_tif, + self.projections.data, + ) + + def make_metadata_list(self): + """ + Creates a metadata list for all of the methods check-marked in the UI. + This is put into the for loop in run. Each item in the list is a + separate metadata dictionary. + """ + metadata_list = [] + for key in self.metadata.metadata["methods"]: + d = self.metadata.metadata["methods"] + keys_to_remove = set(self.metadata.metadata["methods"].keys()) + keys_to_remove.remove(key) + _d = { + k.replace(" ", "_"): d[k] for k in set(list(d.keys())) - keys_to_remove + } + _ = self.metadata_class() + _.metadata = self.metadata.metadata.copy() + _.metadata["methods"] = _d + newkey = key.replace(" ", "_") # put underscores in method names + if _.metadata["methods"][newkey]: + metadata_list.append(_) # append only true methods + + return metadata_list + + def init_projections(self): + self.px_range = (self.px_range_x, self.px_range_y) + if not self.downsample: + self.pyramid_level = -1 + self.ds_factor = np.power(2, int(self.pyramid_level + 1)) # will be 1 for no ds + self.px_range_x_ds = [ + int(np.around(x / self.ds_factor)) for x in self.px_range_x + ] + self.px_range_y_ds = [ + int(np.around(y / self.ds_factor)) for y in self.px_range_y + ] + self.px_range_ds = (self.px_range_x_ds, self.px_range_y_ds) + if self.pyramid_level == -1: + self.projections.get_parent_data_from_hdf(self.px_range_ds) + self.prjs = self.projections.data + else: + self.projections.get_parent_data_ds_from_hdf( + self.pyramid_level, self.px_range_ds + ) + self.prjs = self.projections.data_ds + + # Pad + self.pad_ds = tuple([int(np.around(x / self.ds_factor)) for x in self.pad]) + self.prjs = pad_projections(self.prjs, self.pad_ds) + + # center of rotation change to fit new range + if not self.use_multiple_centers: + self.center = self.center / self.ds_factor + self.center = self.center - self.px_range_x_ds[0] + self.pad_ds[0] + if self.use_multiple_centers: + # get centers for padded/downsampled data. just for the + # computation, not saved in metadata + centers_ds = [x[0] / self.ds_factor for x in self.analysis_parent.Center.center_slice_list] + slices_ds = [ + int(np.around(x[1] / self.ds_factor)) + for x in self.analysis_parent.Center.center_slice_list + ] + try: + linreg = linregress(slices_ds, centers_ds) + except ValueError: + self.center = self.center / self.ds_factor + self.center = self.center - self.px_range_x_ds[0] + self.pad_ds[0] + self.metadata.metadata["use_multiple_centers"] = False + self.use_multiple_centers = False + else: + m, b = linreg.slope, linreg.intercept + slices_pad = range( + self.px_range_y_ds[0] - self.pad_ds[1], + self.px_range_y_ds[1] + self.pad_ds[1], + ) + self.center = [m * x + b for x in slices_pad] + self.center = [ + c - self.px_range_x_ds[0] + self.pad_ds[0] for c in self.center + ] + + def _save_data_after(self): + if not self.skip_mk_wd_subdir: + self.make_wd_subdir() + self.metadata.filedir = self.wd_subdir + + def make_wd_subdir(self): + """ + Creates a save directory to put projections into. + """ + now = datetime.datetime.now() + dt_str = now.strftime("%Y%m%d-%H%M-") + method_str = list(self.metadata.metadata["methods"].keys())[0] + dt_str = dt_str + method_str + self.wd_subdir = self.wd / dt_str + if self.wd_subdir.exists(): + dt_str = now.strftime("%Y%m%d-%H%M%S-") + dt_str = dt_str + self.savedir_suffix + self.wd_subdir = self.wd / dt_str + self.wd_subdir.mkdir() + + @abstractmethod + def run(self): + ... + + +class RunRecon(RunAnalysisBase): + """ """ + + def __init__(self, Recon): + self.recon = None + self.metadata_class = Metadata_Recon + self.metadata = self.metadata_class() + self.savedir_suffix = "recon" + super().__init__(Recon) + + def save_data_after(self): + super()._save_data_after() + if self.metadata.metadata["save_opts"]["Reconstruction"]: + tf.imwrite(self.wd_subdir / "recon.tif", self.recon) + self.analysis_parent.run_list.append({self.wd_subdir: self.metadata}) + self.metadata.save_metadata() + + def reconstruct(self): + # ensure it only runs on 1 thread for CUDA + os.environ["TOMOPY_PYTHON_THREADS"] = "1" + method_str = list(self.metadata.metadata["methods"].keys())[0] + if ( + method_str in astra_cuda_recon_algorithm_underscores + and os.environ["cuda_enabled"] == "True" + ): + self.current_recon_is_cuda = True + else: + self.current_recon_is_cuda = False + + if method_str == "MLEM_CUDA": + method_str = "EM_CUDA" + + # TODO: parsing recon method could be done in an Align method + if method_str == "SIRT_Plugin": + self.recon = tomocupy_algorithm.recon_sirt_plugin( + self.prjs, + self.angles_rad, + num_iter=self.num_iter, + center=self.center, + ) + elif method_str == "SIRT_3D": + self.recon = tomocupy_algorithm.recon_sirt_3D( + self.prjs, + self.angles_rad, + num_iter=self.num_iter, + center=self.center, + ) + elif method_str == "CGLS_3D": + self.recon = tomocupy_algorithm.recon_cgls_3D_allgpu( + self.prjs, + self.angles_rad, + num_iter=self.num_iter, + center=self.center, + ) + elif self.current_recon_is_cuda: + # Options go into kwargs which go into recon() + kwargs = {} + options = { + "proj_type": "cuda", + "method": method_str, + "num_iter": int(self.num_iter), + } + kwargs["options"] = options + self.recon = tomopy_algorithm.recon( + self.prjs, + self.angles_rad, + algorithm=wrappers.astra, + center=self.center, + ncore=1, + **kwargs, + ) + else: + os.environ["TOMOPY_PYTHON_THREADS"] = str(os.environ["num_cpu_cores"]) + if method_str == "gridrec" or method_str == "fbp": + self.recon = tomopy_algorithm.recon( + self.prjs, + self.angles_rad, + algorithm=method_str, + center=self.center, + ) + else: + self.recon = tomopy_algorithm.recon( + self.prjs, + self.angles_rad, + algorithm=method_str, + center=self.center, + num_iter=self.num_iter, + ) + self.recon = unpad_rec_with_pad(self.recon, self.pad_ds) + self.recon = circ_mask(self.recon, axis=0) + return self + + def save_data_before_analysis(self): + pass + + def run(self): + super().init_projections() + metadata_list = super().make_metadata_list() + for i in range(len(metadata_list)): + self.metadata = metadata_list[i] + tic = time.perf_counter() + self.reconstruct() + self.projections.data = self.recon + toc = time.perf_counter() + self.metadata.metadata["analysis_time"] = { + "seconds": toc - tic, + "minutes": (toc - tic) / 60, + "hours": (toc - tic) / 3600, + } + self.save_data_after() + + +class RunAlign(RunAnalysisBase): + """ """ + + def __init__(self, Align): + self.shift = None + self.sx = None + self.sy = None + self.conv = None + self.metadata_class = Metadata_Align + self.metadata = self.metadata_class() + self.savedir_suffix = "alignment" + super().__init__(Align) + + def init_projections(self): + super().init_projections() + if self.use_subset_correlation: + self.subset_x = [int(x / self.ds_factor) for x in self.subset_x] + self.subset_y = [int(y / self.ds_factor) for y in self.subset_y] + self.subset_x = [int(x) + self.pad_ds[0] for x in self.subset_x] + self.subset_y = [int(y) + self.pad_ds[1] for y in self.subset_y] + else: + self.subset_x = None + self.subset_y = None + + def align(self): + """ + Aligns a TomoData object using options in GUI. + """ + for method in self.metadata.metadata["methods"]: + if ( + method in astra_cuda_recon_algorithm_underscores + and os.environ["cuda_enabled"] == "True" + ): + self.current_align_is_cuda = True + align_joint_cupy(self) + else: + self.current_align_is_cuda = False + os.environ["TOMOPY_PYTHON_THREADS"] = str(os.environ["num_cpu_cores"]) + import scipy.fft as fft + + fft.set_backend("scipy") + if method == "gridrec" or method == "fbp": + self.prjs, self.sx, self.sy, self.conv = align_joint_tomopy( + self.prjs, + self.angles_rad, + upsample_factor=self.upsample_factor, + center=self.center, + algorithm=method, + ) + else: + self.prjs, self.sx, self.sy, self.conv = align_joint_tomopy( + self.prjs, + self.angles_rad, + upsample_factor=self.upsample_factor, + center=self.center, + algorithm=method, + iters=self.num_iter, + ) + + def _shift_prjs_after_alignment(self): + if self.shift_full_dataset_after: + self.projections.get_parent_data_from_hdf(None) + if self.current_align_is_cuda: + self.projections._data = shift_projections( + self.projections.data, self.sx, self.sy + ) + self.projections.data = self.projections._data + else: + # TODO: make shift projections without cupy + pass + else: + self.projections._data = self.prjs + self.projections.data = self.projections._data + + # def _copy_parent_hists(self): + # if self.copy_hists: + # self.projections.get_parent_hists(0) + + def save_data_after(self): + super()._save_data_after() + self.metadata.metadata["sx"] = list(self.sx) + self.metadata.metadata["sy"] = list(self.sy) + self.metadata.metadata["convergence"] = list(self.conv) + self.saved_as_hdf = False + if self.metadata.metadata["save_opts"]["Projections After Alignment"] or self.analysis_parent.save_after_alignment: + if self.metadata.metadata["save_opts"]["hdf"]: + self.projections.filepath = ( + self.wd_subdir / "normalized_projections.hdf5" + ) + data_dict = {self.projections.hdf_key_norm_proj: self.projections.data} + self.projections.dask_data_to_h5(data_dict) + self.saved_as_hdf = True + elif self.metadata.metadata["save_opts"]["tiff"]: + tf.imwrite( + self.wd_subdir / "normalized_projections.tif", + self.projections.data, + ) + else: + self.projections.filepath = ( + self.wd_subdir / "normalized_projections.hdf5" + ) + data_dict = {self.projections.hdf_key_norm_proj: self.projections.data} + self.projections.dask_data_to_h5(data_dict) + if self.metadata.metadata["save_opts"]["Reconstruction"] and self.current_align_is_cuda: + if self.metadata.metadata["save_opts"]["tiff"]: + tf.imwrite(self.wd_subdir / "recon.tif", self.recon) + + self.analysis_parent.run_list.append({str(self.wd_subdir.stem): self.metadata}) + self.projections.metadata = self.metadata + self.metadata.save_metadata() + + def run(self): + """ """ + + metadata_list = self.make_metadata_list() + for i in range(len(metadata_list)): + self.metadata = metadata_list[i] + self.init_projections() + tic = perf_counter() + self.align() + # make new dataset and pad/shift it + self._shift_prjs_after_alignment() + # self._copy_parent_hists() + toc = perf_counter() + self.metadata.metadata["analysis_time"] = { + "seconds": toc - tic, + "minutes": (toc - tic) / 60, + "hours": (toc - tic) / 3600, + } + self.save_data_after() + # self.save_reconstructed_data() diff --git a/tomopyui/backend/tomoalign.py b/tomopyui/backend/tomoalign.py deleted file mode 100644 index 5fc523e..0000000 --- a/tomopyui/backend/tomoalign.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env python - -from copy import copy, deepcopy -from skimage.transform import rescale # look for better option -from time import perf_counter -import os - -if os.environ["cuda_enabled"] == "True": - from ..tomocupy.prep.alignment import align_joint as align_joint_cupy - from ..tomocupy.prep.alignment import shift_prj_cp -from tomopy.prep.alignment import align_joint as align_joint_tomopy -from .util.metadata_io import save_metadata, load_metadata -from tomopyui.backend.util.padding import * -from tomopyui._sharedvars import * - -import datetime -import json -import os -import tifffile as tf -import tomopyui.backend.tomodata as td -import numpy as np - - -class TomoAlign: - """ """ - - def __init__(self, Align): - - # -- Creating attributes for alignment calcs -------------------------- - self._set_attributes_from_frontend(Align) - self.tomo = td.TomoData(metadata=Align.Import.metadata) - self.wd_parent = Align.Import.wd - self.plot_output1 = Align.plot_output1 - self.plot_output2 = Align.plot_output2 - self.shift = None - self.sx = None - self.sy = None - self.conv = None - self.recon = None - # TODO: probably not great place to store - self.metadata["parent_fpath"] = self.Align.Import.fpath - self.metadata["parent_fname"] = self.Align.Import.fname - self.metadata["angle_start"] = self.Align.Import.angle_start - self.metadata["angle_end"] = self.Align.Import.angle_end - - self.make_wd() - self._main() - - def _set_attributes_from_frontend(self, Align): - self.Align = Align - self.metadata = Align.metadata.copy() - if Align.partial: - self.prj_range_x = Align.prj_range_x - self.prj_range_y = Align.prj_range_y - self.pad = (Align.paddingX, Align.paddingY) - self.downsample = Align.downsample - if self.downsample: - self.downsample_factor = Align.downsample_factor - else: - self.downsample_factor = 1 - self.num_batches = Align.num_batches - self.pad_ds = tuple([int(self.downsample_factor * x) for x in self.pad]) - self.center = Align.center + self.pad_ds[0] - self.num_iter = Align.num_iter - self.upsample_factor = Align.upsample_factor - - def make_wd(self): - now = datetime.datetime.now() - os.chdir(self.wd_parent) - dt_string = now.strftime("%Y%m%d-%H%M-") - try: - os.mkdir(dt_string + "alignment") - os.chdir(dt_string + "alignment") - except: - os.mkdir(dt_string + "alignment-1") - os.chdir(dt_string + "alignment-1") - save_metadata("overall_alignment_metadata.json", self.metadata) - #!!!!!!!!!! make option for tiff file save - if self.metadata["save_opts"]["tomo_before"]: - np.save("projections_before_alignment", self.tomo.prj_imgs) - self.wd = os.getcwd() - - def make_metadata_list(self): - """ - Creates a metadata list for all of the methods check-marked in the UI. - This is put into the for loop in _main. Each item in the list is a - separate metadata dictionary. - """ - metadata_list = [] - for key in self.metadata["methods"]: - d = self.metadata["methods"] - keys_to_remove = set(self.metadata["methods"].keys()) - keys_to_remove.remove(key) - _d = { - k.replace(" ", "_"): d[k] for k in set(list(d.keys())) - keys_to_remove - } - _metadata = self.metadata.copy() - _metadata["methods"] = _d - newkey = key.replace(" ", "_") # put underscores in method names - if _metadata["methods"][newkey]: - metadata_list.append(_metadata) # append only true methods - - return metadata_list - - def init_prj(self): - if self.metadata["partial"]: - prj_range_x_low = self.prj_range_x[0] - prj_range_x_high = self.prj_range_x[1] - prj_range_y_low = self.prj_range_y[0] - prj_range_y_high = self.prj_range_y[1] - self.prjs = deepcopy( - self.tomo.prj_imgs[ - :, - prj_range_y_low:prj_range_y_high:1, - prj_range_x_low:prj_range_x_high:1, - ] - ) - # center of rotation change to fit new range - self.center = self.center - prj_range_x_low - else: - self.prjs = deepcopy(self.tomo.prj_imgs) - - # Downsample - if self.downsample: - self.prjs = rescale( - self.prjs, - (1, self.downsample_factor, self.downsample_factor), - anti_aliasing=True, - ) - # center of rotation change for downsampled data - self.center = self.center * self.downsample_factor - - # Pad - self.prjs, self.pad_ds = pad_projections(self.prjs, self.pad_ds) - - def align(self): - """ - Aligns a TomoData object using options in GUI. - """ - for method in self.metadata["methods"]: - if ( - method in astra_cuda_recon_algorithm_underscores - and os.environ["cuda_enabled"] == "True" - ): - self.current_align_is_cuda = True - align_joint_cupy(self) - else: - self.current_align_is_cuda = False - os.environ["TOMOPY_PYTHON_THREADS"] = str(os.environ["num_cpu_cores"]) - import scipy.fft as fft - - fft.set_backend("scipy") - if method == "gridrec" or method == "fbp": - self.prjs, self.sx, self.sy, self.conv = align_joint_tomopy( - self.prjs, - self.tomo.theta, - upsample_factor=self.upsample_factor, - center=self.center, - algorithm=method, - ) - else: - self.prjs, self.sx, self.sy, self.conv = align_joint_tomopy( - self.prjs, - self.tomo.theta, - upsample_factor=self.upsample_factor, - center=self.center, - algorithm=method, - iters=self.num_iter, - ) - - def save_align_data(self): - - # if on the second alignment, go into the directory most recently saved - # !!!!!!!!!!!! need change directory - now = datetime.datetime.now() - dt_string = now.strftime("%Y%m%d-%H%M-") - method_str = list(self.metadata["methods"].keys())[0] - os.chdir(self.wd) - savedir = dt_string + method_str - os.mkdir(savedir) - os.chdir(savedir) - self.metadata["savedir"] = os.getcwd() - save_metadata("metadata.json", self.metadata) - if self.metadata["save_opts"]["tomo_after"]: - if self.metadata["save_opts"]["npy"]: - np.save("projections_after_alignment", self.tomo_aligned.prj_imgs) - if self.metadata["save_opts"]["tiff"]: - tf.imwrite( - "projections_after_alignment.tif", self.tomo_aligned.prj_imgs - ) - - # defaults to at least saving tiff if none are checked - if ( - not self.metadata["save_opts"]["tiff"] - and not self.metadata["save_opts"]["npy"] - ): - tf.imwrite( - "projections_after_alignment.tif", self.tomo_aligned.prj_imgs - ) - if self.metadata["save_opts"]["recon"] and self.current_align_is_cuda: - if self.metadata["save_opts"]["npy"]: - np.save("last_recon", self.recon) - if self.metadata["save_opts"]["tiff"]: - tf.imwrite("last_recon.tif", self.recon) - if ( - not self.metadata["save_opts"]["tiff"] - and not self.metadata["save_opts"]["npy"] - ): - tf.imwrite("last_recon.tif", self.recon) - self.Align.run_list.append({savedir: self.metadata}) - np.save("sx", self.sx) - np.save("sy", self.sy) - np.save("conv", self.conv) - - def _shift_prjs_after_alignment( - self, - ): - new_prj_imgs = deepcopy(self.tomo.prj_imgs) - new_prj_imgs, self.pad = pad_projections(new_prj_imgs, self.pad) - new_prj_imgs = shift_prj_cp( - new_prj_imgs, - self.sx, - self.sy, - self.num_batches, - self.pad, - use_corr_prj_gpu=False, - ) - new_prj_imgs = trim_padding(new_prj_imgs) - self.tomo_aligned = td.TomoData( - prj_imgs=new_prj_imgs, metadata=self.Align.Import.metadata - ) - - def _main(self): - """ - Reconstructs a TomoData object using options in GUI. - """ - - metadata_list = self.make_metadata_list() - for i in range(len(metadata_list)): - self.metadata = metadata_list[i] - self.init_prj() - tic = perf_counter() - self.align() - # make new dataset and pad/shift it - if self.current_align_is_cuda: - self._shift_prjs_after_alignment() - else: - self.tomo_aligned = td.TomoData( - prj_imgs=self.prjs, metadata=self.Align.Import.metadata - ) - - toc = perf_counter() - self.metadata["analysis_time"] = { - "seconds": toc - tic, - "minutes": (toc - tic) / 60, - "hours": (toc - tic) / 3600, - } - self.save_align_data() - # self.save_reconstructed_data() diff --git a/tomopyui/backend/tomodata.py b/tomopyui/backend/tomodata.py deleted file mode 100644 index 610bbc1..0000000 --- a/tomopyui/backend/tomodata.py +++ /dev/null @@ -1,328 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -Module with abstraction from core tomopy functions. Includes classes TomoData -and Recon, which store useful information about the projections and -reconstructions. Can use these classes for plotting. Written for use in -Jupyter notebook found in doc/demo (TODO: add Jupyter notebook here) - -""" - -from __future__ import print_function - -import logging -import numexpr as ne -import dxchange -import numpy as np -import matplotlib -import matplotlib.pyplot as plt -import smtplib -import time -import os -import tifffile as tf -import glob - -from tomopy.sim.project import angles as angle_maker -from matplotlib import animation, rc, colors -from matplotlib.widgets import Slider - - -# ----------------------------- Class TomoData -------------------------# - - -class TomoData: - def __init__( - self, - prj_imgs=None, - numX=None, - numY=None, - num_theta=None, - theta=None, - verbose_import=False, - metadata=None, - fname=None, - fpath=None, - angle_start=None, - angle_end=None - # correctionOptions=dict(), - ): - self.metadata = metadata - self.prj_imgs = prj_imgs - self.numX = numX - self.numY = numY - self.num_theta = num_theta - self.theta = theta - self.verbose_import = verbose_import - self.fpath = fpath - self.fname = fname - - if self.verbose_import is True: - logging.getLogger("dxchange").setLevel(logging.INFO) - else: - logging.getLogger("dxchange").setLevel(logging.WARNING) - - if self.metadata is not None and self.prj_imgs is None: - self.fpath = self.metadata["fpath"] - self.fname = self.metadata["fname"] - self.metadata["imgtype"] = "" - self.filetype_parser() - if self.metadata["imgtype"] == "tiff": - self = self.import_tiff() - if self.metadata["imgtype"] == "tiff folder": - self = self.import_tiff_folder() - if self.metadata["imgtype"] == "npy": - self = self.import_npy() - - if self.prj_imgs is not None: - self.num_theta, self.numY, self.numX = self.prj_imgs.shape - # can probably fix this later to rely on user input. Right now user - # input is only for storing metadata, maybe better that way. - if self.theta is None and self.num_theta is not None: - self.theta = angle_maker( - self.num_theta, - ang1=self.metadata["angle_start"], - ang2=self.metadata["angle_end"], - ) - - if self.prj_imgs is None: - logging.warning("This did not import.") - - # --------------------------Import Functions--------------------------# - - def filetype_parser(self): - if self.fname == "": - self.metadata["imgtype"] = "tiff folder" - if self.fname.__contains__(".tif"): - # if there is a file name, checks to see if there are many more - # tiffs in the folder. If there are, will upload all of them. - tiff_count_in_folder = len(glob.glob1(self.fpath, "*.tif")) - if tiff_count_in_folder > 50: - self.metadata["imgtype"] = "tiff folder" - else: - self.metadata["imgtype"] = "tiff" - if self.fname.__contains__(".npy"): - self.metadata["imgtype"] = "npy" - - return self - - def import_tiff(self): - """ - Import tiff and create TomoData object based on option_dict. - - Returns - ------- - self : TomoData - """ - # navigates to path selected. User may pick a file instead of a folder. - os.chdir(self.metadata["fpath"]) - self.prj_imgs = dxchange.reader.read_tiff(self.metadata["fname"]).astype( - np.float32 - ) - if self.prj_imgs.ndim == 2: - self.prj_imgs = self.prj_imgs[np.newaxis, :, :] - # this will rotate it 90 degrees. Can update to rotate it multiple - # times. - if "rotate" in self.metadata: - if self.metadata["rotate"]: - self.prj_imgs = np.swapaxes(self.prj_imgs, 1, 2) - self.prj_imgs = np.flip(self.prj_imgs, 2) - return self - - def import_tiff_folder(self, num_theta=None): - """ - Import tiffs in a folder. - - Parameters - ---------- - num_theta: int, required - Total number of projection images taken. - Returns - ------- - self : TomoData - """ - # navigates to path selected. User may pick a file instead of a folder. - # This should not matter, it ignores that. - - os.chdir(self.metadata["fpath"]) - # Using tiffsequence instead of dxchange. dxchange.read_tiff_stack - # does not do a good job finding files if they do not have a number - # at the end. - - image_sequence = tf.TiffSequence() - self.num_theta = len(image_sequence.files) - self.prj_imgs = image_sequence.asarray().astype(np.float32) - image_sequence.close() - # rotate dataset 90 deg if wanted - if "rotate" in self.metadata: - if self.metadata["rotate"]: - self.prj_imgs = np.swapaxes(self.prj_imgs, 1, 2) - self.prj_imgs = np.flip(self.prj_imgs, 2) - - return self - - def import_npy(self): - """ - Import tiff and create TomoData object based on option_dict. - - Returns - ------- - self : TomoData - """ - # navigates to path selected. User may pick a file instead of a folder. - os.chdir(self.metadata["fpath"]) - self.prj_imgs = np.load(self.metadata["fname"]).astype(np.float32) - if self.prj_imgs.ndim == 2: - self.prj_imgs = self.prj_imgs[np.newaxis, :, :] - # this will rotate it 90 degrees. Can update to rotate it multiple - # times. - if "rotate" in self.metadata: - if self.metadata["opts"]: - self.prj_imgs = np.swapaxes(self.prj_imgs, 1, 2) - self.prj_imgs = np.flip(self.prj_imgs, 2) - return self - - def writeTiff(self, fname): - """ - Writes prj_imgs attribute to file. - - Parameters - ---------- - fname : str, relative or absolute filepath. - - """ - dxchange.write_tiff(self.prj_imgs, fname=fname) - - # --------------------------Correction Functions--------------------------# - - def removeStripes(self, options): - """ - Remove stripes from sinograms so that you end up with less ring - artifacts in reconstruction. - - eg - - .. highlight:: python - .. code-block:: python - - tomoCorr = tomo.removeStripes( - options={ - "remove_all_stripe": { - "snr": 3, - "la_size": 61, - "sm_size": 21, - "dim": 1, - "ncore": None, - "nchunk": None, - }, - "remove_large_stripe": { - "snr": 3, - "size": 51, - "drop_ratio": 0.1, - "norm": True, - "ncore": None, - "nchunk": None, - }, - } - ) - - Parameters - ---------- - options : nested dict - - The formatting here is important - the keys in the 0th level of - the dictionary (i.e. 'remove_all_stripe') will call - the tomopy.prep.stripe function with the same name. Its - corresponding value are the options input into that function. - The order of operations will proceed with the first dictionary - key given, then the second, and so on... - - Returns - ------- - self : tomoData - """ - for key in options: - if key == "remove_all_stripe": - print("Performing ALL stripe removal.") - self.prj_imgs = tomopy.prep.stripe.remove_all_stripe( - self.prj_imgs, **options[key] - ) - if key == "remove_large_stripe": - print("Performing LARGE stripe removal.") - self.prj_imgs = tomopy.prep.stripe.remove_large_stripe( - self.prj_imgs, **options[key] - ) - self.correctionOptions = options - return self - - -############################# TomoDataCombined ############################# - - -def normalize(tomo, flat, dark, rmZerosAndNans=True): - """ - Normalizes the data with typical options for normalization. TODO: Needs - more options. - TODO: add option to delete the previous objects from memory. - - Parameters - ---------- - rmZerosAndNans : bool - Remove the zeros and nans from normalized data. - - Returns - ------- - tomoNorm : TomoData - Normalized data in TomoData object - tomoNormMLog : TomoData - Normalized + -log() data in TomoData object - """ - tomoNormprj_imgs = tomopy.normalize(tomo.prj_imgs, flat.prj_imgs, dark.prj_imgs) - tomoNorm = TomoData(prj_imgs=tomoNormprj_imgs, raw="No") - tomoNormMLogprj_imgs = tomopy.minus_log(tomoNormprj_imgs) - tomoNormMLog = TomoData(prj_imgs=tomoNormMLogprj_imgs, raw="No") - if rmZerosAndNans == True: - tomoNormMLog.prj_imgs = tomopy.misc.corr.remove_nan( - tomoNormMLog.prj_imgs, val=0.0 - ) - tomoNormMLog.prj_imgs[tomoNormMLog.prj_imgs == np.inf] = 0 - return tomoNorm, tomoNormMLog - - -################################### Misc. Functions ########################### - - -def textme(phoneNumber, carrierEmail, gmail_user, gmail_password): - """ - From https://stackabuse.com/how-to-send-emails-with-gmail-using-python/. - - Sends a text message to you when called. - - Parameters - ---------- - phoneNumber : str - carrierEmail : this is your carrier email. TODO, find list of these, and - allow input of just the carrier. Idea from TXMWizard software. - gmail_user : str, gmail username - gmail_password : str, gmail password - """ - - toaddr = str(phoneNumber + "@" + carrierEmail) - fromaddr = gmail_user - message_subject = "Job done." - message_text = "Finished the job." - message = ( - "From: %s\r\n" % fromaddr - + "To: %s\r\n" % toaddr - + "Subject: %s\r\n" % message_subject - + "\r\n" - + message_text - ) - try: - server = smtplib.SMTP_SSL("smtp.gmail.com", 465) - server.ehlo() - server.login(gmail_user, gmail_password) - server.sendmail(fromaddr, toaddr, message) - server.close() - except: - print("Something went wrong...") diff --git a/tomopyui/backend/tomorecon.py b/tomopyui/backend/tomorecon.py deleted file mode 100644 index a7cb1bf..0000000 --- a/tomopyui/backend/tomorecon.py +++ /dev/null @@ -1,222 +0,0 @@ -from joblib import Parallel, delayed -from time import process_time, perf_counter, sleep -from tomopy.recon import wrappers -from tomopy.prep.alignment import scale as scale_tomo -from contextlib import nullcontext -from tomopy.recon import algorithm -from tomopy.misc.corr import circ_mask -from .util.metadata_io import save_metadata, load_metadata -from tomopy.recon import algorithm as tomopy_algorithm -from tomopyui.backend.tomoalign import TomoAlign -from tomopyui.backend.util.padding import * - -import tomopyui.backend.tomodata as td -import matplotlib.pyplot as plt -import datetime -import time -import json -import os -import tifffile as tf -import tomopy -import numpy as np - -if os.environ["cuda_enabled"] == "True": - import astra - import tomopyui.tomocupy.recon.algorithm as tomocupy_algorithm - import cupy as cp - - -class TomoRecon(TomoAlign): - """ """ - - def __init__(self, Recon, Align=None): - # -- Creating attributes for reconstruction calcs --------------------- - self._set_attributes_from_frontend(Recon) - self.metadata["parent_fpath"] = self.Recon.Import.fpath - self.metadata["parent_fname"] = self.Recon.Import.fname - self.metadata["angle_start"] = self.Recon.Import.angle_start - self.metadata["angle_end"] = self.Recon.Import.angle_end - self.tomo = td.TomoData(metadata=Recon.Import.metadata) - self.recon = None - self.wd_parent = Recon.Import.wd - self.make_wd() - self._main() - - def _set_attributes_from_frontend(self, Recon): - self.Recon = Recon - self.metadata = Recon.metadata.copy() - self.partial = Recon.partial - if self.partial: - self.prj_range_x = Recon.prj_range_x - self.prj_range_y = Recon.prj_range_y - self.pad = (Recon.paddingX, Recon.paddingY) - self.downsample = Recon.downsample - if self.downsample: - self.downsample_factor = Recon.downsample_factor - else: - self.downsample_factor = 1 - self.pad_ds = tuple([int(self.downsample_factor * x) for x in self.pad]) - self.center = Recon.center + self.pad_ds[0] - self.num_iter = Recon.num_iter - self.upsample_factor = Recon.upsample_factor - - def make_wd(self): - now = datetime.datetime.now() - os.chdir(self.wd_parent) - dt_string = now.strftime("%Y%m%d-%H%M-") - try: - os.mkdir(dt_string + "recon") - os.chdir(dt_string + "recon") - except: - os.mkdir(dt_string + "recon-1") - os.chdir(dt_string + "recon-1") - save_metadata("overall_recon_metadata.json", self.metadata) - if self.metadata["save_opts"]["tomo_before"]: - np.save("projections_before_alignment", self.tomo.prj_imgs) - self.wd = os.getcwd() - - def save_reconstructed_data(self): - now = datetime.datetime.now() - dt_string = now.strftime("%Y%m%d-%H%M-") - method_str = list(self.metadata["methods"].keys())[0] - os.chdir(self.wd) - savedir = dt_string + method_str - os.mkdir(savedir) - os.chdir(savedir) - self.metadata["savedir"] = os.getcwd() - save_metadata("metadata.json", self.metadata) - - if self.metadata["save_opts"]["tomo_before"]: - if self.metadata["save_opts"]["npy"]: - np.save("tomo", self.tomo.prj_imgs) - if self.metadata["save_opts"]["tiff"]: - tf.imwrite("tomo.tif", self.tomo.prj_imgs) - if ( - not self.metadata["save_opts"]["tiff"] - and not self.metadata["save_opts"]["npy"] - ): - tf.imwrite("tomo.tif", self.tomo.prj_imgs) - if self.metadata["save_opts"]["recon"]: - if self.metadata["save_opts"]["npy"]: - np.save("recon", self.recon) - if self.metadata["save_opts"]["tiff"]: - tf.imwrite("recon.tif", self.recon) - if ( - not self.metadata["save_opts"]["tiff"] - and not self.metadata["save_opts"]["npy"] - ): - tf.imwrite("recon.tif", self.recon) - self.Recon.run_list.append({savedir: self.metadata}) - - def reconstruct(self): - - # ensure it only runs on 1 thread for CUDA - os.environ["TOMOPY_PYTHON_THREADS"] = "1" - method_str = list(self.metadata["methods"].keys())[0] - if ( - method_str in self.Recon.astra_cuda_methods_list - and os.environ["cuda_enabled"] == "True" - ): - self.current_recon_is_cuda = True - else: - self.current_recon_is_cuda = False - - if method_str == "MLEM_CUDA": - method_str = "EM_CUDA" - - # Initialization of reconstruction dataset - tomo_shape = self.prjs.shape - self.recon = np.empty( - (tomo_shape[1], tomo_shape[2], tomo_shape[2]), dtype=np.float32 - ) - self.Recon.log.info("Starting" + method_str) - - # TODO: parsing recon method could be done in an Align method - if method_str == "SIRT_Plugin": - self.recon = tomocupy_algorithm.recon_sirt_plugin( - self.prjs, - self.tomo.theta, - num_iter=self.num_iter, - rec=self.recon, - center=self.center, - ) - elif method_str == "SIRT_3D": - self.recon = tomocupy_algorithm.recon_sirt_3D( - self.prjs, - self.tomo.theta, - num_iter=self.num_iter, - rec=self.recon, - center=self.center, - ) - elif method_str == "CGLS_3D": - self.recon = tomocupy_algorithm.recon_cgls_3D_allgpu( - self.prjs, - self.tomo.theta, - num_iter=self.num_iter, - rec=self.recon, - center=self.center, - ) - elif self.current_recon_is_cuda: - # Options go into kwargs which go into recon() - kwargs = {} - options = { - "proj_type": "cuda", - "method": method_str, - "num_iter": self.num_iter, - # TODO: "extra_options": {}, - } - kwargs["options"] = options - self.recon = tomopy_algorithm.recon( - self.prjs, - self.tomo.theta, - algorithm=wrappers.astra, - init_recon=self.recon, - center=self.center, - ncore=1, - **kwargs, - ) - else: - # defined in _main.py - os.environ["TOMOPY_PYTHON_THREADS"] = str(os.environ["num_cpu_cores"]) - if algorithm == "gridrec" or algorithm == "fbp": - - self.recon = tomopy_algorithm.recon( - self.prjs, - self.tomo.theta, - algorithm=method_str, - init_recon=self.recon, - center=self.center, - ) - else: - self.recon = tomopy_algorithm.recon( - self.prjs, - self.tomo.theta, - algorithm=method_str, - init_recon=self.recon, - center=self.center, - num_iter=self.num_iter, - ) - - return self - - def _main(self): - """ - Reconstructs a TomoData object using options in GUI. - """ - - metadata_list = super().make_metadata_list() - for i in range(len(metadata_list)): - self.metadata = metadata_list[i] - super().init_prj() - tic = time.perf_counter() - self.reconstruct() - self.recon = unpad_rec_with_pad(self.recon, self.pad_ds) - self.recon = circ_mask(self.recon, axis=0) - toc = time.perf_counter() - - self.metadata["analysis_time"] = { - "seconds": toc - tic, - "minutes": (toc - tic) / 60, - "hours": (toc - tic) / 3600, - } - self.save_reconstructed_data() diff --git a/tomopyui/backend/util/center.py b/tomopyui/backend/util/center.py index 1a8e723..3d262b2 100644 --- a/tomopyui/backend/util/center.py +++ b/tomopyui/backend/util/center.py @@ -7,12 +7,14 @@ import numpy as np from tomopy.misc.corr import circ_mask -from tomopy.recon.algorithm import recon +from tomopy.recon.algorithm import recon as recon_tomo import tomopy.util.dtype as dtype + # includes astra_cuda_recon_algorithm_kwargs, tomopy_recon_algorithm_kwargs, # and tomopy_filter_names, extend_description_style from tomopyui._sharedvars import * + def write_center( tomo, theta, @@ -25,6 +27,8 @@ def write_center( sinogram_order=False, filter_name="parzen", ): + if theta is None: + return None, cen_range tomo = dtype.as_float32(tomo) theta = dtype.as_float32(theta) @@ -44,28 +48,28 @@ def write_center( stack[m] = tomo[ind] else: stack[m] = tomo[:, ind, :] - + os.environ["TOMOPY_PYTHON_THREADS"] = str(os.environ["num_cpu_cores"]) # Reconstruct the same slice with a range of centers. if algorithm == "gridrec" or algorithm == "fbp": - rec = recon( + rec = recon_tomo( stack, theta, center=center, sinogram_order=True, algorithm=algorithm, filter_name=filter_name, - nchunk=1, + # nchunk=1, ) else: - rec = recon( + rec = recon_tomo( stack, theta, center=center, sinogram_order=True, algorithm=algorithm, num_iter=num_iter, - ncore=None, - nchunk=1, + # ncore=None, + # nchunk=1, ) # Apply circular mask. diff --git a/tomopyui/backend/util/dask_downsample.py b/tomopyui/backend/util/dask_downsample.py new file mode 100644 index 0000000..55ac836 --- /dev/null +++ b/tomopyui/backend/util/dask_downsample.py @@ -0,0 +1,372 @@ +## This is a wrapper for skimage.transform.pyramid_reduce for large image series that +## uses dask. Pretty disorganized, but it works for my use case. Could be expanded +## eventually. + +# TODO: currently, spline filter does not work properly - it is also splining the z axis +# USE multiple 1d splines + +import dask +import dask.array as da +import dask_image +import math +import numpy as np +import scipy.ndimage as ndi +import dask_image.ndfilters +import dask_image.ndinterp +import h5py +import os + +from numpy.lib import NumpyVersion +from scipy import __version__ as scipy_version +from collections.abc import Iterable + + +def pyramid_reduce_gaussian( + image, + downscale=2, + sigma=None, + order=3, + mode="reflect", + cval=0.0, + preserve_range=False, + channel_axis=0, + pyramid_levels=3, + h5_filepath=None, + compute=False, + io_obj=None, +): + + """ + Wrapper for skimage.transform.pyramid_reduce_gaussian. + + Parameters + ---------- + image: dask.array + Time series images that you want to reduce into pyramid form. + downscale: int + Factor by which you would like to downscale the image (along "X" and "Y" pixels) + sigma + Gaussian standard deviation. Will apply equally on x and y, but not on the channel + axis (which defaults to 0). + order: int + Found in pyramid_reduce order description. + mode: str + Check pyramid_reduce for description. + cval + Defines constant value added to borders. + pyramid_levels: int + Number of levels to downscale by 2. + """ + from tomopyui.backend.io import IOBase + + coarseneds = [] + hists = [] + return_da = True + downsample_factor = 1 + if h5_filepath is not None: + compute = False + return_da = False + open_file = h5py.File(h5_filepath, "r+") + if IOBase.hdf_key_ds in open_file: + del open_file[IOBase.hdf_key_ds] + if io_obj is not None: + compute = False + return_da = False + io_obj._open_hdf_file_append() + io_obj._delete_downsampled_data() + image = io_obj.hdf_file[io_obj.hdf_key_norm_proj] + open_file = io_obj.hdf_file + h5_filepath = io_obj.filepath + + if compute: + return_da = False + if not isinstance(image, da.Array): + image = da.from_array(image, chunks = "auto") + else: + image = image.rechunk(chunks="auto") + for i in range(pyramid_levels): + pad_on_levels = _check_divisible(image, 2) + if pad_on_levels is not None: + image = da.pad(image, pad_on_levels) + filtered = pyramid_reduce( + image, + downscale=downscale, + sigma=sigma, + order=order, + mode=mode, + cval=cval, + preserve_range=preserve_range, + channel_axis=channel_axis, + ) + + if filtered is None: + break + coarsened = da.coarsen(np.mean, filtered, {0: 1, 1: 2, 2: 2}).astype(np.float32) + r = [da.min(coarsened), da.max(coarsened)] + bins = 200 if coarsened.size > 200 else coarsened.size + hist = da.histogram(coarsened, range=r, bins=bins) + percentile = da.percentile(coarsened.flatten(), q=(0.5, 99.5)) + downsample_factor = da.from_array(np.power(2, i + 1)) + + if h5_filepath is not None: + + subgrp = IOBase.hdf_key_ds + str(i) + "/" + r = da.from_array(r) + downsample_factor = downsample_factor + savedict = { + subgrp + IOBase.hdf_key_data: coarsened, + subgrp + IOBase.hdf_key_bin_frequency: hist[0], + subgrp + IOBase.hdf_key_bin_edges: hist[1], + subgrp + IOBase.hdf_key_image_range: r, + subgrp + IOBase.hdf_key_percentile: percentile, + subgrp + IOBase.hdf_key_ds_factor: downsample_factor, + } + da.to_hdf5(h5_filepath, savedict) + bin_edges = da.from_array(open_file[subgrp + IOBase.hdf_key_bin_edges]) + bin_centers = da.from_array( + [ + (bin_edges[i] + bin_edges[i + 1]) / 2 + for i in range(len(bin_edges) - 1) + ] + ) + da.to_hdf5(h5_filepath, subgrp + IOBase.hdf_key_bin_centers, bin_centers) + image = da.from_array(open_file[subgrp + IOBase.hdf_key_data]) + else: + coarseneds.append(coarsened) + hists.append(hist) + open_file.close() + + if compute: + computed_coarseneds = [coarsened.compute() for coarsened in coarsened] + computed_hists = [hist.compute() for hist in hists] + return computed_coarseneds, computed_hists + elif return_da: + return coarseneds, hists + + +def pyramid_reduce( + image, + downscale=2, + sigma=None, + order=3, + mode="reflect", + cval=0.0, + preserve_range=False, + channel_axis=0, +): + + """ + Wrapper for skimage.transform.pyramid_reduce. + + Parameters + ---------- + image: dask.array + Time series images that you want to reduce into pyramid form. + downscale: int + Factor by which you would like to downscale the image (along "X" and "Y" pixels) + sigma + Gaussian standard deviation. Will apply equally on x and y, but not on the channel + axis (which defaults to 0). + order: int + Found in pyramid_reduce order description. + mode: str + Padding mode? Check pyramid_reduce for description. + cval + Defines constant value added to + """ + _check_factor(downscale) + if channel_axis is not None: + channel_axis = channel_axis % image.ndim + out_shape = tuple( + math.ceil(d / float(downscale)) if ax != channel_axis else d + for ax, d in enumerate(image.shape) + ) + else: + out_shape = tuple(math.ceil(d / float(downscale)) for d in image.shape) + + if sigma is None: + # automatically determine sigma which covers > 99% of distribution + sigma = 2 * downscale / 6.0 + + smoothed = _smooth(image, sigma, mode, cval, channel_axis) + # TODO: change names. Resize only spline interpolates the data right now. + try: + filtered = resize( + smoothed, out_shape, order=order, mode=mode, cval=cval, anti_aliasing=False + ) + except ValueError as e: + return + else: + return filtered + + +def _check_divisible(arr, factor): + shape = arr.shape + shape_mod = [dim % 2 for dim in shape] + if any(x != 0 for x in shape_mod): + pad = [0 if mod == 0 else 1 for mod in shape_mod] + pad[0] = 0 + pad = [(0, p) for p in pad] + return pad + else: + return None + + +def _check_factor(factor): + if factor <= 1: + raise ValueError("scale factor must be greater than 1") + + +def _smooth(image, sigma, mode, cval, channel_axis): + """Return image with each channel smoothed by the Gaussian filter.""" + # apply Gaussian filter to all channels independently + if channel_axis is not None: + # can rely on gaussian to insert a 0 entry at channel_axis + channel_axis = channel_axis % image.ndim + sigma = (sigma,) * (image.ndim - 1) + else: + channel_axis = None + smoothed = gaussian(image, sigma, mode=mode, cval=cval, channel_axis=channel_axis) + return smoothed + + +def gaussian(image, sigma=1, mode="nearest", cval=0.0, truncate=4.0, channel_axis=0): + if channel_axis is not None: + # do not filter across channels + if len(sigma) == image.ndim - 1: + sigma = list(sigma) + sigma.insert(channel_axis % image.ndim, 0) + return dask_image.ndfilters.gaussian_filter( + image, sigma, mode=mode, cval=cval, truncate=truncate + ) + + +def resize( + image, + output_shape, + order=3, + mode="reflect", + cval=0.0, + clip=True, + preserve_range=False, + anti_aliasing=None, + anti_aliasing_sigma=None, +): + image, output_shape = _preprocess_resize_output_shape(image, output_shape) + input_shape = image.shape + input_type = image.dtype + if input_type == np.float16: + image = image.astype(np.float32) + + if anti_aliasing is None: + anti_aliasing = not input_type == bool and any( + x < y for x, y in zip(output_shape, input_shape) + ) + + if input_type == bool and anti_aliasing: + raise ValueError("anti_aliasing must be False for boolean images") + factors = np.divide(input_shape, output_shape) + # Save input value range for clip + # img_bounds = [da.min(image), da.max(image)] if clip else None + # Translate modes used by np.pad to those used by scipy.ndimage + ndi_mode = _to_ndimage_mode(mode) + if NumpyVersion(scipy_version) >= "1.6.0": + # The grid_mode kwarg was introduced in SciPy 1.6.0 + zoom_factors = [1 / f for f in factors] + out = zoom(image, zoom_factors, mode=ndi_mode, cval=cval, grid_mode=True) + return out + + +def _preprocess_resize_output_shape(image, output_shape): + output_shape = tuple(output_shape) + output_ndim = len(output_shape) + input_shape = image.shape + if output_ndim > image.ndim: + # append dimensions to input_shape + input_shape += (1,) * (output_ndim - image.ndim) + elif output_ndim == image.ndim - 1: + output_shape = output_shape + (image.shape[-1],) + elif output_ndim < image.ndim: + raise ValueError( + "output_shape length cannot be smaller than the " + "image number of dimensions" + ) + + return image, output_shape + + +def _to_ndimage_mode(mode): + """Convert from `numpy.pad` mode name to the corresponding ndimage mode.""" + mode_translation_dict = dict( + constant="constant", + edge="nearest", + symmetric="reflect", + reflect="mirror", + wrap="wrap", + ) + if mode not in mode_translation_dict: + raise ValueError( + ( + f"Unknown mode: '{mode}', or cannot translate mode. The " + f"mode should be one of 'constant', 'edge', 'symmetric', " + f"'reflect', or 'wrap'. See the documentation of numpy.pad for " + f"more info." + ) + ) + return _fix_ndimage_mode(mode_translation_dict[mode]) + + +def _fix_ndimage_mode(mode): + # SciPy 1.6.0 introduced grid variants of constant and wrap which + # have less surprising behavior for images. Use these when available + grid_modes = {"constant": "grid-constant", "wrap": "grid-wrap"} + if NumpyVersion(scipy_version) >= "1.6.0": + mode = grid_modes.get(mode, mode) + return mode + + +def zoom( + input, zoom, order=3, mode="constant", cval=0.0, prefilter=True, grid_mode=False +): + if order < 0 or order > 5: + raise RuntimeError("spline order not supported") + if input.ndim < 1: + raise RuntimeError("input and output rank must be > 0") + zoom = _normalize_sequence(zoom, input.ndim) + output_shape = tuple([int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)]) + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = dask_image.ndinterp.spline_filter1d(padded, order, axis=1, mode=mode) + filtered = dask_image.ndinterp.spline_filter1d(padded, order, axis=2, mode=mode) + return filtered + + +def _normalize_sequence(input, rank): + """If input is a scalar, create a sequence of length equal to the + rank by duplicating the input. If input is a sequence, + check if its length is equal to the length of array. + """ + is_str = isinstance(input, str) + if not is_str and isinstance(input, Iterable): + normalized = list(input) + if len(normalized) != rank: + err = "sequence argument must have length equal to input rank" + raise RuntimeError(err) + else: + normalized = [input] * rank + return normalized + + +def _prepad_for_spline_filter(input, mode, cval): + if mode in ["nearest", "grid-constant"]: + npad = 12 + if mode == "grid-constant": + padded = da.pad(input, npad, mode="constant", constant_values=cval) + elif mode == "nearest": + padded = da.pad(input, npad, mode="edge") + else: + # other modes have exact boundary conditions implemented so + # no prepadding is needed + npad = 0 + padded = input + return padded, npad diff --git a/tomopyui/widgets/_import/__init__.py b/tomopyui/backend/util/dxchange/__init__.py similarity index 100% rename from tomopyui/widgets/_import/__init__.py rename to tomopyui/backend/util/dxchange/__init__.py diff --git a/tomopyui/backend/util/dxchange/reader.py b/tomopyui/backend/util/dxchange/reader.py new file mode 100644 index 0000000..41ef408 --- /dev/null +++ b/tomopyui/backend/util/dxchange/reader.py @@ -0,0 +1,1276 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# ######################################################################### +# Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. # +# # +# Copyright 2015. UChicago Argonne, LLC. This software was produced # +# under U.S. Government contract DE-AC02-06CH11357 for Argonne National # +# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the # +# U.S. Department of Energy. The U.S. Government has rights to use, # +# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR # +# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR # +# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is # +# modified to produce derivative works, such modified software should # +# be clearly marked, so as not to confuse it with the version available # +# from ANL. # +# # +# Additionally, redistribution and use in source and binary forms, with # +# or without modification, are permitted provided that the following # +# conditions are met: # +# # +# * Redistributions of source code must retain the above copyright # +# notice, this list of conditions and the following disclaimer. # +# # +# * Redistributions in binary form must reproduce the above copyright # +# notice, this list of conditions and the following disclaimer in # +# the documentation and/or other materials provided with the # +# distribution. # +# # +# * Neither the name of UChicago Argonne, LLC, Argonne National # +# Laboratory, ANL, the U.S. Government, nor the names of its # +# contributors may be used to endorse or promote products derived # +# from this software without specific prior written permission. # +# # +# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS # +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago # +# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # +# POSSIBILITY OF SUCH DAMAGE. # +# ######################################################################### + +""" +Module for importing data files. +""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import numpy as np +import six +import os +import h5py +import logging +import re +import math +import struct +from contextlib import contextmanager +import dxchange.writer as writer +from dxchange.dtype import empty_shared_array +import warnings +import functools +import tifffile +import scipy.misc as sm +import pandas as pd +from itertools import cycle +from io import StringIO + +__author__ = "Doga Gursoy, Francesco De Carlo" +__copyright__ = "Copyright (c) 2015-2016, UChicago Argonne, LLC." +__version__ = "0.1.0" +__docformat__ = "restructuredtext en" +__all__ = [ + "read_dx_meta", + "read_edf", + "read_hdf5", + "read_netcdf4", + "read_npy", + "read_spe", + "read_fits", + "read_tiff", + "read_tiff_stack", + "read_xrm", + "read_xrm_stack", + "read_aps_1id_metafile", + "read_txrm", + "read_hdf5_stack", + "read_file_list", +] + +logger = logging.getLogger(__name__) + + +def _check_import(modname): + try: + return __import__(modname) + except ImportError: + logger.warn(modname + " module not found") + return None + + +# Optional dependencies. +spefile = _check_import("spefile") +netCDF4 = _check_import("netCDF4") +EdfFile = _check_import("EdfFile") +astropy = _check_import("astropy") +olefile = _check_import("olefile") + +# FIXME: raise exception would make more sense, also not sure an extension check +# is very useful, unless we are automatically mapping an extension to a +# function. +def _check_read(fname): + known_extensions = [ + ".edf", + ".tiff", + ".tif", + ".h5", + ".hdf", + ".npy", + ".nc", + ".xrm", + ".txrm", + ".txm", + ".xmt", + ] + if not isinstance(fname, six.string_types): + logger.error("File name must be a string") + else: + if writer.get_extension(fname) not in known_extensions: + logger.error("Unknown file extension") + return os.path.abspath(fname) + + +def read_tiff(fname, slc=None): + """ + Read data from tiff file. + + Parameters + ---------- + fname : str + String defining the path of file or file name. + slc : sequence of tuples, optional + Range of values for slicing data in each axis. + ((start_1, end_1, step_1), ... , (start_N, end_N, step_N)) + defines slicing parameters for each axis of the data matrix. + + Returns + ------- + ndarray + Output 2D image. + """ + fname = _check_read(fname) + try: + arr = tifffile.imread(fname, out="memmap") + except IOError: + logger.error("No such file or directory: %s", fname) + return False + arr = _slice_array(arr, slc) + _log_imported_data(fname, arr) + return arr + + +def read_tiff_stack(fname, ind, digit=None, slc=None): + """ + Read data from stack of tiff files in a folder. + + Parameters + ---------- + fname : str + One of the file names in the tiff stack. + ind : list of int + Indices of the files to read. + digit : int + (Deprecated) Number of digits used in indexing stacked files. + slc : sequence of tuples, optional + Range of values for slicing data in each axis. + ((start_1, end_1, step_1), ... , (start_N, end_N, step_N)) + defines slicing parameters for each axis of the data matrix. + + Returns + ------- + ndarray + Output 3D image. + """ + fname = _check_read(fname) + list_fname = _list_file_stack(fname, ind, digit) + + arr = _init_arr_from_stack(list_fname[0], len(ind), slc) + for m, fname in enumerate(list_fname): + arr[m] = read_tiff(fname, slc) + _log_imported_data(fname, arr) + return arr + + +def read_xrm(fname, slice_range=None): + """ + Read data from xrm file. + + Parameters + ---------- + fname : str + String defining the path of file or file name. + slice_range : sequence of tuples, optional + Range of values for slicing data in each axis. + ((start_1, end_1, step_1), ... , (start_N, end_N, step_N)) + defines slicing parameters for each axis of the data matrix. + + Returns + ------- + ndarray + Output 2D image. + """ + fname = _check_read(fname) + try: + ole = olefile.OleFileIO(fname) + except IOError: + print("No such file or directory: %s", fname) + return False + + metadata = read_ole_metadata(ole) + + if slice_range is None: + slice_range = (slice(None), slice(None)) + else: + slice_range = _make_slice_object_a_tuple(slice_range) + + stream = ole.openstream("ImageData1/Image1") + data = stream.read() + + data_type = _get_ole_data_type(metadata) + data_type = data_type.newbyteorder("<") + + arr = np.reshape( + np.fromstring(data, data_type), + (metadata["image_width"], metadata["image_height"]), + )[slice_range] + + _log_imported_data(fname, arr) + + ole.close() + return arr, metadata + + +# Should slc just take over what ind is doing here? +def read_xrm_stack(fname, ind, slc=None): + """ + Read data from stack of xrm files in a folder. + + Parameters + ---------- + fname : str + One of the file names in the tiff stack. + ind : list of int + Indices of the files to read. + slc : sequence of tuples, optional + Range of values for slicing data in each axis. + ((start_1, end_1, step_1), ... , (start_N, end_N, step_N)) + defines slicing parameters for each axis of the data matrix. + + Returns + ------- + ndarray + Output 3D image. + """ + fname = _check_read(fname) + list_fname = _list_file_stack(fname, ind) + + number_of_images = len(ind) + arr, metadata = _init_ole_arr_from_stack(list_fname[0], number_of_images, slc) + del metadata["thetas"][0] + del metadata["x_positions"][0] + del metadata["y_positions"][0] + del metadata["z_positions"][0] + + for m, fname in enumerate(list_fname): + arr[m], angle_metadata = read_xrm(fname, slc) + metadata["thetas"].append(angle_metadata["thetas"][0]) + metadata["x_positions"].append(angle_metadata["x_positions"][0]) + metadata["y_positions"].append(angle_metadata["y_positions"][0]) + metadata["z_positions"].append(angle_metadata["z_positions"][0]) + + _log_imported_data(fname, arr) + return arr, metadata + + +def read_aps_1id_metafile(metafn): + """ + Parse log file generated at APS 1-ID + + Parameters + ---------- + metafn : str + Path to metafile of the experiment + + Returns + ------- + dataframe + Metadata stored as Pandas DataFrame. + """ + # use pandas to organize metadata + with open(metafn) as f: + rawlines = f.readlines() + + # locate each layer + # - each layer much have a head start with "Beginning of tomography" + # - failed layer does not contain "End of the full scan" + scan_head_ln = [ + i for i, line in enumerate(rawlines) if "Beginning of tomography" in line + ] + [len(rawlines)] + layers_lns = list(zip(scan_head_ln[0:-1], scan_head_ln[1:])) + layers_isValid = [ + ("End of the full scan" in "".join(rawlines[lns[0] : lns[1]])) + for i, lns in enumerate(layers_lns) + ] + + # parse each layer into DataFrames + dfs = [] + for layerID, lns in enumerate(layers_lns): + # skip over the incomplete layer + if not layers_isValid[layerID]: + continue + + # init meta line as None + # NOTE: + # These meta string might or might not present in the meta file, + # depending on runtime settings + path = None + prefix = None + energy = None + image_type = None + tomo_metastr = None + + # prep for current layer + layer_rawlines = rawlines[lns[0] : lns[1]] + cycled_imgtypes = cycle( + [ + "pre_white", + "still", + "post_white", + "post_dark", + ] + ) + + # iterate through each line + for i in range(len(layer_rawlines)): + ln = layer_rawlines[i] + + # the format of the metadata file requires hard coding... + if "num nSeq" not in ln: + # layer meta? + # -- metastr seems important, so I keep the whole string for + # each individual img for now + # However, this is definitely not a good practice in the + # long run. + if ":" in ln: + entry_key = ln.split(":")[0] + entry = ":".join(ln.split(":")[1:]).strip() + if entry_key.lower() == "Path".lower(): + path = entry + elif entry_key.lower() == "Image prefix".lower(): + prefix = entry + elif entry_key.lower() == "Energy (keV)".lower(): + energy = float(entry) + elif entry_key.lower() == "New omega position".lower(): + image_type = next(cycled_imgtypes) + elif entry_key.lower() == "tomo_metastr".lower(): + tomo_metastr = entry + else: + continue + else: + # this is the start of an image meta info block + block_start = i + while True: + i = i + 1 + if layer_rawlines[i] == "\n": + block_end = i + break + # construct a dataframe from the block + # -- the date time column contains single white space, which + # makes it impossible to directly use white space as the + # delimenator. Here we replace all 2+ white space with + # tab so that later Pandas can easily identify each column + image_block = [ + re.sub(" +", "\t", line.strip()) + for line in layer_rawlines[block_start:block_end] + ] + + # construct the dataframe + df = pd.read_csv(StringIO("\n".join(image_block)), sep="\t") + # -- having layerID makes it easier to see what went wrong + # during the experiment by directly locating the corrupted + # layer + df["layerID"] = layerID + df["path"] = path + df["energy(kev)"] = energy + df["prefix"] = prefix + df["type"] = image_type + df["metastr"] = tomo_metastr + # now convert the time to datetime object + df["Date"] = pd.to_datetime( + df["Date"], + infer_datetime_format=True, + ) + + dfs.append(df) + + return pd.concat(dfs, ignore_index=True) + + +def read_txrm(file_name, slice_range=None): + """ + Read data from a .txrm file, a compilation of .xrm files. + + Parameters + ---------- + file_name : str + String defining the path of file or file name. + slice_range : sequence of tuples, optional + Range of values for slicing data in each axis. + ((start_1, end_1, step_1), ... , (start_N, end_N, step_N)) + defines slicing parameters for each axis of the data matrix. + + Returns + ------- + ndarray + Array of 2D images. + + dictionary + Dictionary of metadata. + """ + file_name = _check_read(file_name) + try: + ole = olefile.OleFileIO(file_name) + except IOError: + print("No such file or directory: %s", file_name) + return False + + metadata = read_ole_metadata(ole) + + array_of_images = np.empty( + _shape_after_slice( + ( + metadata["number_of_images"], + metadata["image_height"], + metadata["image_width"], + ), + slice_range, + ), + dtype=_get_ole_data_type(metadata), + ) + + if slice_range is None: + slice_range = (slice(None), slice(None), slice(None)) + else: + slice_range = _make_slice_object_a_tuple(slice_range) + + for i, idx in enumerate( + range(*slice_range[0].indices(metadata["number_of_images"])) + ): + img_string = "ImageData{}/Image{}".format( + int(np.ceil((idx + 1) / 100.0)), int(idx + 1) + ) + array_of_images[i] = _read_ole_image(ole, img_string, metadata)[slice_range[1:]] + + # reference = metadata["reference"] + # if reference is not None: + # metadata["reference"] = reference[slice_range[1:]] + + _log_imported_data(file_name, array_of_images) + + ole.close() + return array_of_images, metadata + + +def read_txm(file_name, slice_range=None): + """ + Read data from a .txm file, the reconstruction file output + by Zeiss software. + + Parameters + ---------- + file_name : str + String defining the path of file or file name. + slice_range : sequence of tuples, optional + Range of values for slicing data in each axis. + ((start_1, end_1, step_1), ... , (start_N, end_N, step_N)) + defines slicing parameters for each axis of the data matrix. + + Returns + ------- + ndarray + Array of 2D images. + + dictionary + Dictionary of metadata. + """ + + return read_txrm(file_name, slice_range) + + +def read_ole_metadata(ole): + """ + Read metadata from an xradia OLE file (.xrm, .txrm, .txm). + + Parameters + ---------- + ole : OleFileIO instance + An ole file to read from. + + Returns + ------- + tuple + A tuple of image metadata. + """ + + number_of_images = _read_ole_value(ole, "ImageInfo/NoOfImages", " 0: + dtype = "uint%s" % bitpix + elif bitpix <= -32: + dtype = "float%s" % -bitpix + else: + dtype = "int%s" % -bitpix + return dtype + + def _readBITPIX(path): + # astropy fits reader has a problem + # have to read BITPIX from the fits file directly + stream = open(path, "rb") + while True: + line = stream.read(80).decode("utf-8") + if line.startswith("BITPIX"): + value = line.split("/")[0].split("=")[1].strip() + value = int(value) + break + continue + stream.close() + return value + + from astropy.io import fits + + f = fits.open(fname) + arr = f[0].data + f.close() + if fixdtype: + dtype = _getDataType(fname) + if dtype: + arr = np.array(arr, dtype=dtype) + _log_imported_data(fname, arr) + return arr + + +def _slice_array(arr, slc): + """ + Perform slicing on ndarray. + + Parameters + ---------- + arr : ndarray + Input array to be sliced. + slc : sequence of tuples + Range of values for slicing data in each axis. + ((start_1, end_1, step_1), ... , (start_N, end_N, step_N)) + defines slicing parameters for each axis of the data matrix. + + Returns + ------- + ndarray + Sliced array. + """ + if slc is None: + logger.debug("No slicing applied to image") + return arr[:] + axis_slice = _make_slice_object_a_tuple(slc) + logger.debug("Data sliced according to: %s", axis_slice) + return arr[axis_slice] + + +def _shape_after_slice(shape, slc): + """ + Return the calculated shape of an array after it has been sliced. + Only handles basic slicing (not advanced slicing). + + Parameters + ---------- + shape : tuple of ints + Tuple of ints defining the ndarray shape + slc : tuple of slices + Object representing a slice on the array. Should be one slice per + dimension in shape. + + """ + if slc is None: + return shape + new_shape = list(shape) + slc = _make_slice_object_a_tuple(slc) + for m, s in enumerate(slc): + # indicies will perform wrapping and such for the shape + start, stop, step = s.indices(shape[m]) + new_shape[m] = int(math.ceil((stop - start) / float(step))) + if new_shape[m] < 0: + new_shape[m] = 0 + return tuple(new_shape) + + +def _list_file_stack(fname, ind, digit=None): + """ + Return a stack of file names in a folder as a list. + + Parameters + ---------- + fname : str + String defining the path of file or file name. + ind : list of int + Indices of the files to read. + digit : int + Deprecated input for the number of digits in all indexes + of the stacked files. + """ + + if digit is not None: + warnings.warn( + ( + "The 'digit' argument is deprecated and no longer used." + " It may be removed completely in a later version." + ), + FutureWarning, + ) + + body = writer.get_body(fname) + body, digits = writer.remove_trailing_digits(body) + + ext = writer.get_extension(fname) + list_fname = [] + for m in ind: + counter_string = str(m).zfill(digits) + list_fname.append(body + counter_string + ext) + return list_fname + + +@contextmanager +def find_dataset_group(fname): + """ + Finds the group name containing the stack of projections datasets within + an ALS BL8.3.2 hdf5 file containing a stack of images + + Parameters + ---------- + fname : str + String defining the path of file or file name. + + Returns + ------- + h5py.Group + """ + with h5py.File(fname, "r") as h5object: + yield _find_dataset_group(h5object) + + +def _find_dataset_group(h5object): + """ + Finds the group name containing the stack of projections datasets within + an ALS BL8.3.2 hdf5 file containing a stack of images + """ + + # Only one root key means only one dataset in BL8.3.2 current format + keys = list(h5object.keys()) + if len(keys): + if isinstance(h5object[keys[0]], h5py.Group): + group_keys = list(h5object[keys[0]].keys()) + if isinstance(h5object[keys[0]][group_keys[0]], h5py.Dataset): + return h5object[keys[0]] + else: + return _find_dataset_group(h5object[keys[0]]) + else: + raise Exception("HDF5 Group with dataset stack not found") + else: + raise Exception("HDF5 Group with dataset stack not found") + + +def _count_proj(group, dname, nproj, digit=4, inter_bright=None): + """ + Count the number of projections that have a specified name structure. + Used to count the number of brights or darks in ALS BL8.3.2 hdf5 files when + number is not present in metadata. + """ + + body = os.path.splitext(dname)[0] + body = "".join(body[:-digit]) + + regex = re.compile(".*(" + body + ").*") + count = len(list(filter(regex.match, list(group.keys())))) + + if inter_bright > 0: + count = count / (nproj / inter_bright + 2) + elif inter_bright == 0: + count = count / 2 + + return int(count) + + +def _map_loc(ind, loc): + """ + Does a linear mapping of the indices where brights where taken within the + full tomography to new indices of only those porjections which where read + The returned list of indices is used in normalize_nn function. + """ + + loc = np.array(loc) + low, upp = ind[0], ind[-1] + buff = (loc[-1] - loc[0]) / len(loc) + min_loc = low - buff + max_loc = upp + buff + loc = np.intersect1d(loc[loc > min_loc], loc[loc < max_loc]) + new_upp = len(ind) - 1 + loc = (new_upp * (loc - low)) // (upp - low) + if loc[0] < 0: + loc[0] = 0 + + return np.ndarray.tolist(loc) + + +def _read_ole_struct(ole, label, struct_fmt): + """ + Reads the struct associated with label in an ole file + """ + value = None + if ole.exists(label): + stream = ole.openstream(label) + data = stream.read() + value = struct.unpack(struct_fmt, data) + return value + + +def _read_ole_value(ole, label, struct_fmt): + """ + Reads the value associated with label in an ole file + """ + value = _read_ole_struct(ole, label, struct_fmt) + if value is not None: + value = value[0] + return value + + +def _read_ole_arr(ole, label, struct_fmt): + """ + Reads the numpy array associated with label in an ole file + """ + arr = _read_ole_struct(ole, label, struct_fmt) + if arr is not None: + arr = np.array(arr) + return arr + + +def _read_ole_image(ole, label, metadata, datatype=None): + stream = ole.openstream(label) + data = stream.read() + data_type = _get_ole_data_type(metadata, datatype) + data_type = data_type.newbyteorder("<") + image = np.reshape( + np.frombuffer(data, data_type), + ( + metadata["image_height"], + metadata["image_width"], + ), + ) + return image + + +def read_hdf5_stack(h5group, dname, ind, digit=4, slc=None, out_ind=None): + """ + Read data from stacked datasets in a hdf5 file + + Parameters + ---------- + + fname : str + One of the dataset names in the dataset stack + + ind : list of int + Indices of the datasets to be read + + digit : int + (Deprecated) Number of digits indexing the stacked datasets + + slc : {sequence, int} + Range of values for slicing data. + ((start_1, end_1, step_1), ... , (start_N, end_N, step_N)) + defines slicing parameters for each axis of the data matrix + + out_ind : list of int, optional + Outer level indices for files with two levels of indexing. + i.e. [name_000_000.tif, name_000_001.tif, ..., name_000_lmn.tif, + name_001_lmn.tif, ..., ..., name_fgh_lmn.tif] + """ + + list_fname = _list_file_stack(dname, ind, digit) + + if out_ind is not None: + list_fname_ = [] + for name in list_fname: + fname = ( + writer.get_body(name).split("/")[-1] + + "_" + + digit * "0" + + writer.get_extension(name) + ) + list_fname_.extend(_list_file_stack(fname, out_ind, digit)) + list_fname = sorted(list_fname_, key=lambda x: str(x).split("_")[-1]) + + for m, image in enumerate(list_fname): + _arr = h5group[image] + _arr = _slice_array(_arr, slc) + if m == 0: + dx, dy, dz = _arr.shape + dx = len(list_fname) + arr = np.empty((dx, dy, dz), dtype=_arr.dtype) + arr[m] = _arr + + return arr + + +def read_file_list(file_list): + """ + Read data from stack of image files in a folder. + + Parameters + ---------- + + file_list : list of str + List of file names to read, in order + """ + + f = file_list[0] + try: + readfunc = tifffile.imread + im = readfunc(f) + except ValueError: + readfunc = functools.partial(sm.imread, flatten=True) + im = readfunc(f) + + if len(im.shape) != 2: + raise ValueError("Only 2D images are supported in read_file_list") + + arr = np.zeros((len(file_list), im.shape[0], im.shape[1]), dtype=im.dtype) + + arr[0] = im + for i, fn in enumerate(file_list[1:]): + arr[i + 1] = readfunc(fn) + + return arr diff --git a/tomopyui/backend/util/metadata_io.py b/tomopyui/backend/util/metadata_io.py deleted file mode 100644 index 33e6723..0000000 --- a/tomopyui/backend/util/metadata_io.py +++ /dev/null @@ -1,80 +0,0 @@ -# https://stackoverflow.com/questions/51674222/how-to-make-json-dumps-in-python-ignore-a-non-serializable-field -import json -import os -import pandas as pd - - -def safe_serialize(obj, f): - default = lambda o: f"<>" - return json.dump(obj, f, default=default, indent=4) - - -def save_metadata(filename, metadata): - with open(filename, "w+") as f: - a = safe_serialize(metadata, f) - - -def load_metadata(filepath=None, filename=None, fullpath=None): - if fullpath is not None: - with open(fullpath) as f: - metadata = json.load(f) - else: - fullpath = os.path.abspath(os.path.join(filepath, filename)) - with open(fullpath) as f: - metadata = json.load(f) - return metadata - - -def metadata_to_DataFrame(metadata): - metadata_frame = {} - time, title = parse_printed_time(metadata["analysis_time"]) - extra_headers = ["Prj X Range", "Prj Y Range", "Start Angle", "End Angle", title] - metadata_frame["Headers"] = list(metadata["opts"].keys()) - metadata_frame["Headers"] = [ - metadata_frame["Headers"][i].replace("_", " ").title().replace("Num", "No.") - for i in range(len(metadata_frame["Headers"])) - ] - metadata_frame["Headers"] = metadata_frame["Headers"] + extra_headers - extra_values = [ - metadata["prj_range_x"], - metadata["prj_range_y"], - metadata["angle_start"], - metadata["angle_end"], - time, - ] - extra_values = [str(extra_values[i]) for i in range(len(extra_values))] - metadata_frame["Values"] = [ - str(metadata["opts"][key]) for key in metadata["opts"] - ] + extra_values - metadata_frame = { - metadata_frame["Headers"][i]: metadata_frame["Values"][i] - for i in range(len(metadata_frame["Headers"])) - } - sr = pd.Series(metadata_frame) - df = pd.DataFrame(sr).transpose() - s = df.style.hide_index() - s.set_table_styles( - [ - {"selector": "th.col_heading", "props": "text-align: center;"}, - {"selector": "th.col_heading.level0", "props": "font-size: 1.2em;"}, - {"selector": "td", "props": "text-align: center;" "font-size: 1.2em; "}, - ], - overwrite=False, - ) - return s - - -def parse_printed_time(timedict): - if timedict["hours"] < 1: - if timedict["minutes"] < 1: - time = timedict["seconds"] - title = "Time (s)" - else: - time = timedict["minutes"] - title = "Time (min)" - else: - time = timedict["hours"] - title = "Time (h)" - - time = f"{time:.1f}" - return time, title diff --git a/tomopyui/backend/util/padding.py b/tomopyui/backend/util/padding.py index 9b3912b..9f21323 100644 --- a/tomopyui/backend/util/padding.py +++ b/tomopyui/backend/util/padding.py @@ -2,13 +2,15 @@ # https://stackoverflow.com/questions/54567986/python-numpy-remove-empty-zeroes-border-of-3d-array + def pad_projections(prj, pad): npad = ((0, 0), ((pad[1]), pad[1]), (pad[0], pad[0])) prj = np.pad(prj, npad, mode="constant", constant_values=0) - return prj, pad + return prj + def trim_padding(prj): - + xs, ys, zs = np.where(np.absolute(prj) > 1e-7) minxs = np.min(xs) @@ -23,6 +25,27 @@ def trim_padding(prj): # not sure why +1 here. return result + +def trim_padding_wrt_shift(prj, sx, sy, init_padding): + + minxs = np.min(sx) + maxxs = np.max(sx) + minys = np.min(sy) + maxys = np.max(sy) + x_total = prj.shape[2] + y_total = prj.shape[1] + x_begin_init = init_padding[0] - 1 + x_begin_shift = int(np.floor(x_begin_init + minxs)) + y_begin_init = init_padding[1] - 1 + y_begin_shift = int(np.floor(y_begin_init + minys)) + x_end_init = x_total - init_padding[0] - 1 + x_end_shift = int(np.ceil(x_end_init + maxys)) + y_end_init = y_total - init_padding[1] - 1 + y_end_shift = int(np.ceil(y_end_init + maxys)) + result = prj[:, y_begin_shift : y_end_shift, x_begin_shift : x_end_shift] + + return result + # https://stackoverflow.com/questions/24806174/is-there-an-opposite-inverse-to-numpy-pad-function def unpad_rec_with_pad(rec, pad): @@ -33,4 +56,16 @@ def unpad_rec_with_pad(rec, pad): for c in npad: e = None if c[1] == 0 else -c[1] slices.append(slice(c[0], e)) - return rec[tuple(slices)] \ No newline at end of file + return rec[tuple(slices)] + + +def pad_to_make_same_size(images_to_pad, images): + to_pad_shape = images_to_pad.shape + reference_shape = images.shape + diffshape = [y - x for x, y in zip(to_pad_shape, reference_shape)] + diffshape = [ + [x / 2, x / 2] if x % 2 == 0 else [x / 2 + 0.5, x / 2 - 0.5] for x in diffshape + ] + pad = tuple([(int(x[0]), int(x[1])) for x in diffshape]) + images_padded = np.pad(images_to_pad, pad) + return images_padded diff --git a/tomopyui/backend/util/registration/_masked_phase_cross_correlation_cupy.py b/tomopyui/backend/util/registration/_masked_phase_cross_correlation_cupy.py index 038b416..0995d22 100644 --- a/tomopyui/backend/util/registration/_masked_phase_cross_correlation_cupy.py +++ b/tomopyui/backend/util/registration/_masked_phase_cross_correlation_cupy.py @@ -17,9 +17,9 @@ from .fft import fftmodule, next_fast_len -def _masked_phase_cross_correlation(reference_image, moving_image, - reference_mask, moving_mask=None, - overlap_ratio=0.3): +def _masked_phase_cross_correlation( + reference_image, moving_image, reference_mask, moving_mask=None, overlap_ratio=0.3 +): """Masked image translation registration by masked normalized cross-correlation. @@ -66,19 +66,24 @@ def _masked_phase_cross_correlation(reference_image, moving_image, if reference_image.shape != moving_image.shape: raise ValueError( "Input images have different shapes, moving_mask must " - "be explicitely set.") + "be explicitely set." + ) moving_mask = cp.array(reference_mask, dtype=bool, copy=True) # We need masks to be of the same size as their respective images - for (im, mask) in [(reference_image, reference_mask), - (moving_image, moving_mask)]: + for (im, mask) in [(reference_image, reference_mask), (moving_image, moving_mask)]: if im.shape != mask.shape: - raise ValueError( - "Image sizes must match their respective mask sizes.") - - xcorr = cross_correlate_masked(moving_image, reference_image, moving_mask, - reference_mask, axes=(0, 1), mode='full', - overlap_ratio=overlap_ratio) + raise ValueError("Image sizes must match their respective mask sizes.") + + xcorr = cross_correlate_masked( + moving_image, + reference_image, + moving_mask, + reference_mask, + axes=(0, 1), + mode="full", + overlap_ratio=overlap_ratio, + ) # Generalize to the average of multiple equal maxima maxima = cp.stack(cp.nonzero(xcorr == xcorr.max()), axis=1) @@ -87,16 +92,15 @@ def _masked_phase_cross_correlation(reference_image, moving_image, # The mismatch in size will impact the center location of the # cross-correlation - size_mismatch = [ - t - s for t, s in zip(moving_image.shape, reference_image.shape) - ] + size_mismatch = [t - s for t, s in zip(moving_image.shape, reference_image.shape)] size_mismatch = cp.asarray(size_mismatch) return -shifts + (size_mismatch / 2) -def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), - overlap_ratio=0.3): +def cross_correlate_masked( + arr1, arr2, m1, m2, mode="full", axes=(-2, -1), overlap_ratio=0.3 +): """ Masked normalized cross-correlation between arrays. @@ -150,7 +154,7 @@ def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), Pattern Recognition, pp. 2918-2925 (2010). :DOI:`10.1109/CVPR.2010.5540032` """ - if mode not in {'full', 'same'}: + if mode not in {"full", "same"}: raise ValueError("Correlation mode '{}' is not valid.".format(mode)) if arr1.dtype.kind == "c" or arr2.dtype.kind == "c": @@ -167,11 +171,12 @@ def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), # Array dimensions along non-transformation axes should be equal. all_axes = set(range(fixed_image.ndim)) - for axis in (all_axes - set(axes)): + for axis in all_axes - set(axes): if fixed_image.shape[axis] != moving_image.shape[axis]: raise ValueError( "Array shapes along non-transformation axes should be " - "equal, but dimensions along axis {a} are not".format(a=axis)) + "equal, but dimensions along axis {a} are not".format(a=axis) + ) # Determine final size along transformation axes # Note that it might be faster to compute Fourier transform in a slightly @@ -179,8 +184,7 @@ def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), # we slice back to`final_shape` using `final_slice`. final_shape = list(arr1.shape) for axis in axes: - final_shape[axis] = fixed_image.shape[axis] + \ - moving_image.shape[axis] - 1 + final_shape[axis] = fixed_image.shape[axis] + moving_image.shape[axis] - 1 final_shape = tuple(final_shape) final_slice = tuple([slice(0, int(sz)) for sz in final_shape]) @@ -211,30 +215,30 @@ def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), # Calculate overlap of masks at every point in the convolution. # Locations with high overlap should not be taken into account. - number_overlap_masked_px = cp.real( - ifft(rotated_moving_mask_fft * fixed_mask_fft) - ) + number_overlap_masked_px = cp.real(ifft(rotated_moving_mask_fft * fixed_mask_fft)) number_overlap_masked_px[:] = cp.around(number_overlap_masked_px) number_overlap_masked_px[:] = cp.fmax(number_overlap_masked_px, eps) masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft) - masked_correlated_rotated_moving_fft = ifft( - fixed_mask_fft * rotated_moving_fft) + masked_correlated_rotated_moving_fft = ifft(fixed_mask_fft * rotated_moving_fft) numerator = ifft(rotated_moving_fft * fixed_fft) - numerator -= masked_correlated_fixed_fft * \ - masked_correlated_rotated_moving_fft / number_overlap_masked_px + numerator -= ( + masked_correlated_fixed_fft + * masked_correlated_rotated_moving_fft + / number_overlap_masked_px + ) fixed_squared_fft = fft(cp.square(fixed_image)) fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft) - fixed_denom -= cp.square(masked_correlated_fixed_fft) / \ - number_overlap_masked_px + fixed_denom -= cp.square(masked_correlated_fixed_fft) / number_overlap_masked_px fixed_denom[:] = cp.fmax(fixed_denom, 0.0) rotated_moving_squared_fft = fft(cp.square(rotated_moving_image)) moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft) - moving_denom -= cp.square(masked_correlated_rotated_moving_fft) / \ - number_overlap_masked_px + moving_denom -= ( + cp.square(masked_correlated_rotated_moving_fft) / number_overlap_masked_px + ) moving_denom[:] = cp.fmax(moving_denom, 0.0) @@ -245,9 +249,8 @@ def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), denom = denom[final_slice] number_overlap_masked_px = number_overlap_masked_px[final_slice] - if mode == 'same': - _centering = partial(_centered, - newshape=fixed_image.shape, axes=axes) + if mode == "same": + _centering = partial(_centered, newshape=fixed_image.shape, axes=axes) denom = _centering(denom) numerator = _centering(numerator) number_overlap_masked_px = _centering(number_overlap_masked_px) @@ -267,8 +270,9 @@ def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1), cp.clip(out, a_min=-1, a_max=1, out=out) # Apply overlap ratio threshold - number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px, - axis=axes, keepdims=True) + number_px_threshold = overlap_ratio * np.max( + number_overlap_masked_px, axis=axes, keepdims=True + ) out[number_overlap_masked_px < number_px_threshold] = 0.0 return out @@ -299,4 +303,4 @@ def _flip(arr, axes=None): for axis in axes: reverse[axis] = slice(None, None, -1) - return arr[tuple(reverse)] \ No newline at end of file + return arr[tuple(reverse)] diff --git a/tomopyui/backend/util/registration/_phase_cross_correlation_cucim.py b/tomopyui/backend/util/registration/_phase_cross_correlation_cucim.py index 43a4f5f..015c515 100644 --- a/tomopyui/backend/util/registration/_phase_cross_correlation_cucim.py +++ b/tomopyui/backend/util/registration/_phase_cross_correlation_cucim.py @@ -12,8 +12,7 @@ from ._masked_phase_cross_correlation_cupy import _masked_phase_cross_correlation -def _upsampled_dft(data, upsampled_region_size, - upsample_factor=1, axis_offsets=None): +def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets=None): """ Upsampled DFT by matrix multiplication. @@ -54,23 +53,28 @@ def _upsampled_dft(data, upsampled_region_size, upsampled_region_size = [upsampled_region_size] * data.ndim else: if len(upsampled_region_size) != data.ndim: - raise ValueError("shape of upsampled region sizes must be equal " - "to input data's number of dimensions.") + raise ValueError( + "shape of upsampled region sizes must be equal " + "to input data's number of dimensions." + ) if axis_offsets is None: axis_offsets = [0] * data.ndim else: if len(axis_offsets) != data.ndim: - raise ValueError("number of axis offsets must be equal to input " - "data's number of dimensions.") + raise ValueError( + "number of axis offsets must be equal to input " + "data's number of dimensions." + ) im2pi = 1j * 2 * np.pi dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets)) for (n_items, ups_size, ax_offset) in dim_properties[::-1]: - kernel = ((cp.arange(ups_size) - ax_offset)[:, None] - * fft.fftfreq(n_items, upsample_factor)) + kernel = (cp.arange(ups_size) - ax_offset)[:, None] * fft.fftfreq( + n_items, upsample_factor + ) kernel = cp.exp(-im2pi * kernel) # CuPy Backend: use kernel of same precision as the data kernel = kernel.astype(data.dtype, copy=False) @@ -107,16 +111,24 @@ def _compute_error(cross_correlation_max, src_amp, target_amp): target_amp : float The normalized average image intensity of the target image """ - error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\ - (src_amp * target_amp) + error = 1.0 - cross_correlation_max * cross_correlation_max.conj() / ( + src_amp * target_amp + ) return cp.sqrt(cp.abs(error)) -def phase_cross_correlation(reference_image, moving_image, *, - upsample_factor=1, space="real", - return_error=True, reference_mask=None, - moving_mask=None, overlap_ratio=0.3): +def phase_cross_correlation( + reference_image, + moving_image, + *, + upsample_factor=1, + space="real", + return_error=True, + reference_mask=None, + moving_mask=None, + overlap_ratio=0.3 +): """Efficient subpixel image translation registration by cross-correlation. This code gives the same precision as the FFT upsampled cross-correlation @@ -193,20 +205,20 @@ def phase_cross_correlation(reference_image, moving_image, *, :DOI:`10.1109/CVPR.2010.5540032` """ if (reference_mask is not None) or (moving_mask is not None): - return _masked_phase_cross_correlation(reference_image, moving_image, - reference_mask, moving_mask, - overlap_ratio) + return _masked_phase_cross_correlation( + reference_image, moving_image, reference_mask, moving_mask, overlap_ratio + ) # images must be the same shape if reference_image.shape != moving_image.shape: raise ValueError("images must be same shape") # assume complex data is already in Fourier space - if space.lower() == 'fourier': + if space.lower() == "fourier": src_freq = reference_image target_freq = moving_image # real data needs to be fft'd. - elif space.lower() == 'real': + elif space.lower() == "real": src_freq = fft.fftn(reference_image) target_freq = fft.fftn(moving_image) else: @@ -246,19 +258,21 @@ def phase_cross_correlation(reference_image, moving_image, *, upsample_factor = float(upsample_factor) # Matrix multiply DFT around the current shift estimate sample_region_offset = dftshift - shifts * upsample_factor - cross_correlation = _upsampled_dft(image_product.conj(), - upsampled_region_size, - upsample_factor, - sample_region_offset).conj() + cross_correlation = _upsampled_dft( + image_product.conj(), + upsampled_region_size, + upsample_factor, + sample_region_offset, + ).conj() # Locate maximum and map back to original pixel grid - maxima = cp.unravel_index(cp.argmax(cp.abs(cross_correlation)), - cross_correlation.shape) + maxima = cp.unravel_index( + cp.argmax(cp.abs(cross_correlation)), cross_correlation.shape + ) CCmax = cross_correlation[maxima] maxima = ( - cp.stack([m.astype(float_dtype, copy=False) for m in maxima]) - - dftshift + cp.stack([m.astype(float_dtype, copy=False) for m in maxima]) - dftshift ) shifts = shifts + maxima / upsample_factor @@ -286,9 +300,13 @@ def phase_cross_correlation(reference_image, moving_image, *, "keywords, eg: " "phase_cross_correlation(reference_image, moving_image, " "reference_mask=~np.isnan(reference_image), " - "moving_mask=~np.isnan(moving_image))") + "moving_mask=~np.isnan(moving_image))" + ) - return shifts, _compute_error(CCmax, src_amp, target_amp),\ - _compute_phasediff(CCmax) + return ( + shifts, + _compute_error(CCmax, src_amp, target_amp), + _compute_phasediff(CCmax), + ) else: - return shifts \ No newline at end of file + return shifts diff --git a/tomopyui/backend/util/registration/_phase_cross_correlation_cupy.py b/tomopyui/backend/util/registration/_phase_cross_correlation_cupy.py index 566e765..d78fc65 100644 --- a/tomopyui/backend/util/registration/_phase_cross_correlation_cupy.py +++ b/tomopyui/backend/util/registration/_phase_cross_correlation_cupy.py @@ -52,7 +52,9 @@ def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets= # if people pass in an integer, expand it to a list of equal-sized sections if not hasattr(upsampled_region_size, "__iter__"): - upsampled_region_size = [upsampled_region_size,] * data.ndim + upsampled_region_size = [ + upsampled_region_size, + ] * data.ndim upsampled_region_size = cp.asarray(upsampled_region_size, dtype=cp.float64) else: if len(upsampled_region_size) != data.ndim: @@ -61,7 +63,9 @@ def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets= "to input data's number of dimensions." ) if axis_offsets is None: - axis_offsets = [0,] * data.ndim + axis_offsets = [ + 0, + ] * data.ndim else: if len(axis_offsets) != data.ndim: raise ValueError( diff --git a/tomopyui/backend/util/registration/fft_cucim.py b/tomopyui/backend/util/registration/fft_cucim.py index ba7bc25..1c503a9 100644 --- a/tomopyui/backend/util/registration/fft_cucim.py +++ b/tomopyui/backend/util/registration/fft_cucim.py @@ -10,4 +10,4 @@ fftmodule = cupyx.scipy.fft -__all__ = ['fftmodule', 'next_fast_len'] \ No newline at end of file +__all__ = ["fftmodule", "next_fast_len"] diff --git a/tomopyui/backend/util/save_metadata.py b/tomopyui/backend/util/save_metadata.py deleted file mode 100644 index d99e525..0000000 --- a/tomopyui/backend/util/save_metadata.py +++ /dev/null @@ -1,12 +0,0 @@ -# https://stackoverflow.com/questions/51674222/how-to-make-json-dumps-in-python-ignore-a-non-serializable-field -import json - - -def safe_serialize(obj, f): - default = lambda o: f"<>" - return json.dump(obj, f, default=default, indent=4) - - -def save_metadata(filename, metadata): - with open(filename, "w+") as f: - a = safe_serialize(metadata, f) diff --git a/tomopyui/tomocupy/archive/align_joint_old.py b/tomopyui/tomocupy/archive/align_joint_old.py deleted file mode 100644 index 08a2ad6..0000000 --- a/tomopyui/tomocupy/archive/align_joint_old.py +++ /dev/null @@ -1,463 +0,0 @@ -from tomopy.prep.alignment import scale as scale_tomo -import tomocupy.recon as tcrecon - - -def align_joint(TomoAlign, Align=None): - - # ensure it only runs on 1 thread for CUDA - os.environ["TOMOPY_PYTHON_THREADS"] = "1" - # Initialize variables from metadata for ease of reading: - num_iter = TomoAlign.metadata["opts"]["num_iter"] - init_tomo_shape = TomoAlign.prj_for_alignment.shape - downsample = TomoAlign.metadata["opts"]["downsample"] - pad = TomoAlign.metadata["opts"]["pad"] - method_str = list(TomoAlign.metadata["methods"].keys())[0] - upsample_factor = TomoAlign.metadata["opts"]["upsample_factor"] - num_batches = TomoAlign.metadata["opts"]["batch_size"] # change to num_batches - center = TomoAlign.metadata["center"] - - # Needs scaling for skimage float operations. - TomoAlign.prj_for_alignment, scl = scale_tomo(TomoAlign.prj_for_alignment) - - # Initialization of reconstruction dataset - tomo_shape = TomoAlign.prj_for_alignment.shape - TomoAlign.recon = np.empty( - (tomo_shape[1], tomo_shape[2], tomo_shape[2]), dtype=np.float32 - ) - - # add progress bar for method. roughly a full-loop progress bar. - # with TomoAlign.method_bar_cm: - # method_bar = tqdm( - # total=num_iter, - # desc=options["method"], - # display=True, - # ) - - # Initialize shift arrays - TomoAlign.sx = np.zeros((init_tomo_shape[0])) - TomoAlign.sy = np.zeros((init_tomo_shape[0])) - TomoAlign.conv = np.zeros((num_iter)) - - # start iterative alignment - for n in range(num_iter): - _rec = TomoAlign.recon - - if TomoAlign.metadata["methods"]["SIRT_CUDA"]["Faster"]: - TomoAlign.recon = tcrecon.recon_sirt_3D( - TomoAlign.prj_for_alignment, - TomoAlign.tomo.theta, - num_iter=1, - rec=_rec, - center=center) - elif TomoAlign.metadata["methods"]["SIRT_CUDA"]["Fastest"]: - TomoAlign.recon = tcrecon.recon_sirt_3D_allgpu( - TomoAlign.prj_for_alignment, - TomoAlign.tomo.theta, - num_iter=2, - rec=_rec, - center=center) - else: - # Options go into kwargs which go into recon() - kwargs = {} - options = { - "proj_type": "cuda", - "method": method_str, - "num_iter": 1 - } - kwargs["options"] = options - - TomoAlign.recon = algorithm.recon( - TomoAlign.prj_for_alignment, - TomoAlign.tomo.theta, - algorithm=wrappers.astra, - init_recon=_rec, - center=center, - ncore=None, - **kwargs, - ) - # update progress bar - # method_bar.update() - - # break up reconstruction into batches along z axis - TomoAlign.recon = np.array_split(TomoAlign.recon, num_batches, axis=0) - # may not need a copy. - _rec = TomoAlign.recon.copy() - - # initialize simulated projection cpu array - sim = [] - - # begin simulating projections using astra. - # this could probably be made more efficient, right now I am not - # certain if I need to be deleting every time. - - with TomoAlign.output1_cm: - TomoAlign.output1_cm.clear_output() - simulate_projections(_rec, sim, center, TomoAlign.tomo.theta) - # del _rec - sim = np.concatenate(sim, axis=1) - - # only flip the simulated datasets if using normal tomopy algorithm - # can remove if it is flipped in the algorithm - if ( - TomoAlign.metadata["methods"]["SIRT_CUDA"]["SIRT Plugin-Faster"] == False - and TomoAlign.metadata["methods"]["SIRT_CUDA"]["SIRT 3D-Fastest"] == False - ): - sim = np.flip(sim, axis=0) - - # Cross correlation - shift_cpu = [] - batch_cross_correlation(TomoAlign.prj_for_alignment, - sim, shift_cpu, - num_batches, upsample_factor, subset_correlation=False, blur=False, pad=pad_ds) - TomoAlign.shift = np.concatenate(shift_cpu, axis=1) - - # Shifting - (TomoAlign.prj_for_alignment, - TomoAlign.sx, - TomoAlign.sy, - TomoAlign.shift, - err, - pad_ds, - center) = warp_prj_shift_cp( - TomoAlign.prj_for_alignment, - TomoAlign.sx, - TomoAlign.sy, - TomoAlign.shift, - num_batches, - pad_ds, - center, - downsample_factor=downsample_factor - ) - TomoAlign.conv[n] = np.linalg.norm(err) - with TomoAlign.output2_cm: - TomoAlign.output2_cm.clear_output(wait=True) - TomoAlign.plotIm(sim) - TomoAlign.plotSxSy(downsample_factor) - print(f"Error = {np.linalg.norm(err):3.3f}.") - - TomoAlign.recon = np.concatenate(TomoAlign.recon, axis=0) - mempool = cp.get_default_memory_pool() - mempool.free_all_blocks() - - # TomoAlign.recon = np.concatenate(TomoAlign.recon, axis=0) - # Re-normalize data - # method_bar.close() - TomoAlign.prj_for_alignment *= scl - TomoAlign.recon = circ_mask(TomoAlign.recon, 0) - if downsample: - TomoAlign.sx = TomoAlign.sx / downsample_factor - TomoAlign.sy = TomoAlign.sy / downsample_factor - TomoAlign.shift = TomoAlign.shift / downsample_factor - - pad = tuple([x / downsample_factor for x in pad_ds]) - # make new dataset and pad/shift it for the next round - new_prj_imgs = deepcopy(TomoAlign.tomo.prj_imgs) - new_prj_imgs, pad = pad_projections(new_prj_imgs, pad, 1) - new_prj_imgs = warp_prj_cp(new_prj_imgs, TomoAlign.sx, TomoAlign.sy, num_batches, pad, use_corr_prj_gpu=False) - new_prj_imgs = trim_padding(new_prj_imgs) - TomoAlign.tomo = td.TomoData( - prj_imgs=new_prj_imgs, metadata=TomoAlign.metadata["importmetadata"]["tomo"] - ) - return TomoAlign - -def plotIm(TomoAlign, sim, projection_num=50): - fig = plt.figure(figsize=(8, 8)) - ax1 = plt.subplot(1, 2, 1) - ax2 = plt.subplot(1, 2, 2) - ax1.imshow(TomoAlign.prj_for_alignment[projection_num], cmap="gray") - ax1.set_axis_off() - ax1.set_title("Projection Image") - ax2.imshow(sim[projection_num], cmap="gray") - ax2.set_axis_off() - ax2.set_title("Re-projected Image") - plt.show() - -def plotSxSy(TomoAlign, downsample_factor): - plotrange = range(TomoAlign.prj_for_alignment.shape[0]) - fig = plt.figure(figsize=(8, 8)) - ax1 = plt.subplot(2, 1, 1) - ax2 = plt.subplot(2, 1, 2) - ax1.set(xlabel= "Projection number",ylabel="Pixel shift (not downsampled)") - ax1.plot(plotrange, TomoAlign.sx/downsample_factor) - ax1.set_title("Sx") - ax2.plot(plotrange, TomoAlign.sy/downsample_factor) - ax2.set_title("Sy") - ax2.set(xlabel= "Projection number",ylabel="Pixel shift (not downsampled)") - plt.show() - - - - - - -def transform_parallel(prj, sx, sy, shift, metadata): -num_theta = prj.shape[0] -err = np.zeros((num_theta + 1, 1)) -shift_y_condition = ( - metadata["opts"]["pad"][1] * metadata["opts"]["downsample_factor"] -) -shift_x_condition = ( - metadata["opts"]["pad"][0] * metadata["opts"]["downsample_factor"] -) - -def transform_algorithm(prj, shift, sx, sy, m): - shiftm = shift[:, m] - # don't let it shift if the value is larger than padding - if ( - np.absolute(sx[m] + shiftm[1]) < shift_x_condition - and np.absolute(sy[m] + shiftm[0]) < shift_y_condition - ): - sx[m] += shiftm[1] - sy[m] += shiftm[0] - err[m] = np.sqrt(shiftm[0] * shiftm[0] + shiftm[1] * shiftm[1]) - - # similarity transform shifts in (x, y) - # tform = transform.SimilarityTransform(translation=(shiftm[1], shiftm[0])) - # prj[m] = transform.warp(prj[m], tform, order=5) - - # found that ndi is much faster than the above warp - # uses opposite convention - shift_tuple = (shiftm[0], shiftm[1]) - shift_tuple = tuple([-1*x for x in shift_tuple]) - prj[m] = ndi.shift(prj[m], shift_tuple, order=5) - -Parallel(n_jobs=-1, require="sharedmem")( - delayed(transform_algorithm)(prj, shift, sx, sy, m) - for m in range(num_theta) - # for m in tnrange(num_theta, desc="Transformation", leave=True) -) -return prj, sx, sy, err - - -def warp_projections(prj, sx, sy, metadata): -num_theta = prj.shape[0] -err = np.zeros((num_theta + 1, 1)) -shift_y_condition = ( - metadata["opts"]["pad"][1] -) -shift_x_condition = ( - metadata["opts"]["pad"][0] -) - -def transform_algorithm_warponly(prj, sx, sy, m): - # don't let it shift if the value is larger than padding - if ( - np.absolute(sx[m]) < shift_x_condition - and np.absolute(sy[m]) < shift_y_condition - ): - # similarity transform shifts in (x, y) - # see above note for ndi switch - # tform = transform.SimilarityTransform(translation=(sx[m], sy[m])) - # prj[m] = transform.warp(prj[m], tform, order=5) - - shift_tuple = (sy[m], sx[m]) - shift_tuple = tuple([-1*x for x in shift_tuple]) - prj[m] = ndi.shift(prj[m], shift_tuple, order=5) - -Parallel(n_jobs=-1, require="sharedmem")( - delayed(transform_algorithm_warponly)(prj, sx, sy, m) - for m in range(num_theta) - # for m in tnrange(num_theta, desc="Transformation", leave=True) -) -return prj - - -def init_new_from_prior(prior_tomoalign, metadata): -prj_imgs = deepcopy(prior_tomoalign.tomo.prj_imgs) -new_tomo = td.TomoData( - prj_imgs=prj_imgs, metadata=metadata["importmetadata"]["tomo"] -) -new_align_object = TomoAlign( - new_tomo, - metadata, - alignment_wd=prior_tomoalign.alignment_wd, - alignment_wd_child=prior_tomoalign.alignment_wd_child, -) -return new_align_object - - -def trim_padding(prj): -# https://stackoverflow.com/questions/54567986/python-numpy-remove-empty-zeroes-border-of-3d-array -xs, ys, zs = np.where(prj > 1e-7) - -minxs = np.min(xs) -maxxs = np.max(xs) -minys = np.min(ys) -maxys = np.max(ys) -minzs = np.min(zs) -maxzs = np.max(zs) - -# extract cube with extreme limits of where are the values != 0 -result = prj[minxs : maxxs + 1, minys : maxys + 1, minzs : maxzs + 1] -# not sure why +1 here. - -return result - - -def simulate_projections(rec, sim, center, theta): -for batch in range(len(rec)): -# for batch in tnrange(len(rec), desc="Re-projection", leave=True): - _rec = rec[batch] - vol_geom = astra.create_vol_geom( - _rec.shape[1], _rec.shape[1], _rec.shape[0] - ) - phantom_id = astra.data3d.create("-vol", vol_geom, data=_rec) - proj_geom = astra.create_proj_geom( - "parallel3d", - 1, - 1, - _rec.shape[0], - _rec.shape[1], - theta, - ) - if center is not None: - center_shift = -(center - _rec.shape[1]/2) - proj_geom = astra.geom_postalignment(proj_geom, (center_shift,)) - projections_id, _sim = astra.creators.create_sino3d_gpu( - phantom_id, proj_geom, vol_geom - ) - _sim = _sim.swapaxes(0, 1) - sim.append(_sim) - astra.data3d.delete(projections_id) - astra.data3d.delete(phantom_id) - -def batch_cross_correlation(prj, sim, shift_cpu, num_batches, upsample_factor, - blur=True, rin=0.5, rout=0.8, subset_correlation=False, - mask_sim=True, pad=(0,0)): -# TODO: the sign convention for shifting is bad here. -# To fix this, change to -# shift_gpu = phase_cross_correlation(_sim_gpu, _prj_gpu...) -# convention right now: -# if _sim is down and to the right, the shift tuple will be (-, -) -# before going positive. -# split into arrays for batch. -_prj = np.array_split(prj, num_batches, axis=0) -_sim = np.array_split(sim, num_batches, axis=0) - -for batch in range(len(_prj)): -# for batch in tnrange(len(_prj), desc="Cross-correlation", leave=True): - # projection images have been shifted. mask also shifts. - # apply the "moving" mask to the simulated projections - # simulated projections have data outside of the mask. - if subset_correlation: - _prj_gpu = cp.array(_prj[batch] - [ - :, - 2*pad[1]:-2*pad[1]:1, - 2*pad[0]:-2*pad[0]:1 - ], - dtype=cp.float32 - ) - _sim_gpu = cp.array(_sim[batch] - [ - :, - 2*pad[1]:-2*pad[1]:1, - 2*pad[0]:-2*pad[0]:1 - ], - dtype=cp.float32 - ) - else: - _prj_gpu = cp.array(_prj[batch], dtype=cp.float32) - _sim_gpu = cp.array(_sim[batch], dtype=cp.float32) - - if mask_sim: - _sim_gpu = cp.where(_prj_gpu < 1e-7, 0, _sim_gpu) - - if blur: - _prj_gpu = blur_edges_cp(_prj_gpu, rin, rout) - _sim_gpu = blur_edges_cp(_sim_gpu, rin, rout) - - # e.g. lets say sim is (-50, 0) wrt prj. This would correspond to - # a shift of [+50, 0] - # In the warping section, we have to now warp prj by (-50, 0), so the - # SAME sign of the shift value given here. - shift_gpu = phase_cross_correlation( - _sim_gpu, - _prj_gpu, - upsample_factor=upsample_factor, - return_error=False, - ) - shift_cpu.append(cp.asnumpy(shift_gpu)) -# shift_cpu = np.concatenate(shift_cpu, axis=1) - -def blur_edges_cp(prj, low=0, high=0.8): -""" -Blurs the edge of the projection images using cupy. - -Parameters ----------- -prj : ndarray - 3D stack of projection images. The first dimension - is projection axis, second and third dimensions are - the x- and y-axes of the projection image, respectively. -low : scalar, optional - Min ratio of the blurring frame to the image size. -high : scalar, optional - Max ratio of the blurring frame to the image size. - -Returns -------- -ndarray - Edge-blurred 3D stack of projection images. -""" -if type(prj) is np.ndarray: - prj_gpu = cp.array(prj, dtype=cp.float32) -else: - prj_gpu = prj -dx, dy, dz = prj_gpu.shape -rows, cols = cp.mgrid[:dy, :dz] -rad = cp.sqrt((rows - dy / 2) ** 2 + (cols - dz / 2) ** 2) -mask = cp.zeros((dy, dz)) -rmin, rmax = low * rad.max(), high * rad.max() -mask[rad < rmin] = 1 -mask[rad > rmax] = 0 -zone = cp.logical_and(rad >= rmin, rad <= rmax) -mask[zone] = (rmax - rad[zone]) / (rmax - rmin) -prj_gpu *= mask -return prj_gpu - - - - - - - - - # warning I don't think I fixed sign convention here. - def transform_parallel(prj, sx, sy, shift, metadata): - num_theta = prj.shape[0] - err = np.zeros((num_theta + 1, 1)) - shift_y_condition = ( - metadata["opts"]["pad"][1] * metadata["opts"]["downsample_factor"] - ) - shift_x_condition = ( - metadata["opts"]["pad"][0] * metadata["opts"]["downsample_factor"] - ) - - def transform_algorithm(prj, shift, sx, sy, m): - shiftm = shift[:, m] - # don't let it shift if the value is larger than padding - if ( - np.absolute(sx[m] + shiftm[1]) < shift_x_condition - and np.absolute(sy[m] + shiftm[0]) < shift_y_condition - ): - sx[m] += shiftm[1] - sy[m] += shiftm[0] - err[m] = np.sqrt(shiftm[0] * shiftm[0] + shiftm[1] * shiftm[1]) - - # similarity transform shifts in (x, y) - # tform = transform.SimilarityTransform(translation=(shiftm[1], shiftm[0])) - # prj[m] = transform.warp(prj[m], tform, order=5) - - # found that ndi is much faster than the above warp - # uses opposite convention - shift_tuple = (shiftm[0], shiftm[1]) - shift_tuple = tuple([-1*x for x in shift_tuple]) - prj[m] = ndi.shift(prj[m], shift_tuple, order=5) - - Parallel(n_jobs=-1, require="sharedmem")( - delayed(transform_algorithm)(prj, shift, sx, sy, m) - for m in range(num_theta) - # for m in tnrange(num_theta, desc="Transformation", leave=True) - ) - return prj, sx, sy, err \ No newline at end of file diff --git a/tomopyui/tomocupy/prep/alignment.py b/tomopyui/tomocupy/prep/alignment.py index 334d6df..a4f3c0e 100644 --- a/tomopyui/tomocupy/prep/alignment.py +++ b/tomopyui/tomocupy/prep/alignment.py @@ -1,55 +1,59 @@ -#!/usr/bin/env python +import astra +import os +import bqplot as bq +import cupy as cp +import numpy as np +import matplotlib.pyplot as plt +import tomopyui.tomocupy.recon.algorithm as tomocupy_algorithm from tomopy.misc.corr import circ_mask from tomopy.recon import wrappers from cupyx.scipy import ndimage as ndi_cp -from tomopyui.backend.util.registration._phase_cross_correlation_cupy import phase_cross_correlation +from tomopyui.backend.util.registration._phase_cross_correlation_cupy import ( + phase_cross_correlation, +) from tomopy.prep.alignment import scale as scale_tomo from tomopy.recon import algorithm as tomopy_algorithm from bqplot_image_gl import ImageGL from ipywidgets import * from tomopyui.backend.util.padding import * -import astra -import os -import bqplot as bq -import cupy as cp -import numpy as np -import matplotlib.pyplot as plt -import tomopyui.tomocupy.recon.algorithm as tomocupy_algorithm - -def align_joint(TomoAlign): +def align_joint(RunAlign): # ensure it only runs on 1 thread for CUDA os.environ["TOMOPY_PYTHON_THREADS"] = "1" # Initialize variables from metadata for ease of reading: - init_tomo_shape = TomoAlign.prjs.shape - num_iter = TomoAlign.num_iter - downsample = TomoAlign.downsample - pad = TomoAlign.pad - method_str = list(TomoAlign.metadata["methods"].keys())[0] + init_tomo_shape = RunAlign.prjs.shape + num_iter = RunAlign.num_iter + downsample = RunAlign.downsample + pad = RunAlign.pad + pad_ds = RunAlign.pad_ds + method_str = list(RunAlign.metadata.metadata["methods"].keys())[0] if method_str == "MLEM_CUDA": method_str = "EM_CUDA" - upsample_factor = TomoAlign.upsample_factor - num_batches = TomoAlign.num_batches - center = TomoAlign.center + upsample_factor = RunAlign.upsample_factor + num_batches = RunAlign.num_batches + center = RunAlign.center + pre_alignment_iters = RunAlign.pre_alignment_iters projection_num = 50 # default to 50 now, TODO: can make an option # Needs scaling for skimage float operations - TomoAlign.prjs, scl = scale_tomo(TomoAlign.prjs) + RunAlign.prjs, scl = scale_tomo(RunAlign.prjs) # Initialization of reconstruction dataset - tomo_shape = TomoAlign.prjs.shape - TomoAlign.recon = np.mean(TomoAlign.prjs) * np.empty( + tomo_shape = RunAlign.prjs.shape + RunAlign.recon = np.mean(RunAlign.prjs) * np.empty( (tomo_shape[1], tomo_shape[2], tomo_shape[2]), dtype=np.float32 ) # Initialize shift/convergence - TomoAlign.sx = np.zeros((tomo_shape[0])) - TomoAlign.sy = np.zeros((tomo_shape[0])) - TomoAlign.conv = np.zeros((num_iter)) + RunAlign.sx = np.zeros((tomo_shape[0])) + RunAlign.sy = np.zeros((tomo_shape[0])) + RunAlign.conv = np.zeros((num_iter)) + subset_x = RunAlign.subset_x + subset_y = RunAlign.subset_y # Initialize projection images plot scale_x = bq.LinearScale(min=0, max=1) @@ -63,17 +67,19 @@ def align_joint(TomoAlign): "x": scale_x, "y": scale_y, "image": bq.ColorScale( - min=float(np.min(TomoAlign.prjs[projection_num])), - max=float(np.max(TomoAlign.prjs[projection_num])), + min=float(np.min(RunAlign.prjs[projection_num])), + max=float(np.max(RunAlign.prjs[projection_num])), scheme="viridis", ), } image_projection = ImageGL( - image=TomoAlign.prjs[projection_num], scales=scales_image, + image=RunAlign.prjs[projection_num], + scales=scales_image, ) image_simulated = ImageGL( - image=np.zeros_like(TomoAlign.prjs[projection_num]), scales=scales_image, + image=np.zeros_like(RunAlign.prjs[projection_num]), + scales=scales_image, ) projection_fig.marks = (image_projection,) @@ -84,8 +90,8 @@ def align_joint(TomoAlign): simulated_fig.layout.width = "600px" simulated_fig.layout.height = "600px" simulated_fig.title = f"Re-projected Image {50}" - with TomoAlign.plot_output1: - TomoAlign.plot_output1.clear_output(wait=True) + with RunAlign.plot_output1: + RunAlign.plot_output1.clear_output(wait=True) display( HBox( [projection_fig, simulated_fig], @@ -100,8 +106,8 @@ def align_joint(TomoAlign): # Initialize Sx, Sy plot xs = bq.LinearScale() ys = bq.LinearScale() - x = range(TomoAlign.prjs.shape[0]) - y = [TomoAlign.sx, TomoAlign.sy] + x = range(RunAlign.prjs.shape[0]) + y = [RunAlign.sx, RunAlign.sy] line = bq.Lines( x=x, y=y, @@ -125,7 +131,7 @@ def align_joint(TomoAlign): xs_conv = bq.LinearScale(min=0) ys_conv = bq.LinearScale() x_conv = [0] - y_conv = [TomoAlign.conv[0]] + y_conv = [RunAlign.conv[0]] line_conv = bq.Lines( x=x_conv, y=y_conv, @@ -147,8 +153,8 @@ def align_joint(TomoAlign): marks=[line_conv], axes=[xax_conv, yax_conv], animation_duration=1000 ) fig_conv.layout.width = "600px" - with TomoAlign.plot_output2: - TomoAlign.plot_output2.clear_output() + with RunAlign.plot_output2: + RunAlign.plot_output2.clear_output() display( HBox( [fig_SxSy, fig_conv], @@ -163,34 +169,38 @@ def align_joint(TomoAlign): # Start alignment for n in range(num_iter): + if n == 0: + recon_iterations = pre_alignment_iters + else: + recon_iterations = 1 # for progress bars - TomoAlign.Align.progress_shifting.value = 0 - TomoAlign.Align.progress_reprj.value = 0 - TomoAlign.Align.progress_phase_cross_corr.value = 0 - _rec = TomoAlign.recon + RunAlign.analysis_parent.progress_shifting.value = 0 + RunAlign.analysis_parent.progress_reprj.value = 0 + RunAlign.analysis_parent.progress_phase_cross_corr.value = 0 + _rec = RunAlign.recon # TODO: handle reconstruction-type parsing elsewhere if method_str == "SIRT_Plugin": - TomoAlign.recon = tomocupy_algorithm.recon_sirt_plugin( - TomoAlign.prjs, - TomoAlign.tomo.theta, - num_iter=1, + RunAlign.recon = tomocupy_algorithm.recon_sirt_plugin( + RunAlign.prjs, + RunAlign.angles_rad, + num_iter=recon_iterations, rec=_rec, center=center, ) elif method_str == "SIRT_3D": - TomoAlign.recon = tomocupy_algorithm.recon_sirt_3D( - TomoAlign.prjs, - TomoAlign.tomo.theta, - num_iter=1, + RunAlign.recon = tomocupy_algorithm.recon_sirt_3D( + RunAlign.prjs, + RunAlign.angles_rad, + num_iter=recon_iterations, rec=_rec, center=center, ) elif method_str == "CGLS_3D": - TomoAlign.recon = tomocupy_algorithm.recon_cgls_3D_allgpu( - TomoAlign.prjs, - TomoAlign.tomo.theta, - num_iter=1, + RunAlign.recon = tomocupy_algorithm.recon_cgls_3D_allgpu( + RunAlign.prjs, + RunAlign.angles_rad, + num_iter=recon_iterations, rec=_rec, center=center, ) @@ -200,26 +210,36 @@ def align_joint(TomoAlign): options = { "proj_type": "cuda", "method": method_str, - "num_iter": 1, + "num_iter": recon_iterations, "extra_options": {"MinConstraint": 0}, } kwargs["options"] = options - TomoAlign.recon = tomopy_algorithm.recon( - TomoAlign.prjs, - TomoAlign.tomo.theta, - algorithm=wrappers.astra, - init_recon=_rec, - center=center, - ncore=1, - **kwargs, - ) - - TomoAlign.recon[np.isnan(TomoAlign.recon)] = 0 - TomoAlign.Align.progress_total.value = n + 1 + if n == 0: + RunAlign.recon = tomopy_algorithm.recon( + RunAlign.prjs, + RunAlign.angles_rad, + algorithm=wrappers.astra, + center=center, + ncore=1, + **kwargs, + ) + else: + RunAlign.recon = tomopy_algorithm.recon( + RunAlign.prjs, + RunAlign.angles_rad, + algorithm=wrappers.astra, + init_recon=_rec, + center=center, + ncore=1, + **kwargs, + ) + + RunAlign.recon[np.isnan(RunAlign.recon)] = 0 + RunAlign.analysis_parent.progress_total.value = n + 1 # break up reconstruction into batches along z axis - TomoAlign.recon = np.array_split(TomoAlign.recon, num_batches, axis=0) + RunAlign.recon = np.array_split(RunAlign.recon, num_batches, axis=0) # may not need a copy. - _rec = TomoAlign.recon.copy() + _rec = RunAlign.recon.copy() # initialize simulated projection cpu array sim = [] @@ -232,8 +252,8 @@ def align_joint(TomoAlign): _rec, sim, center, - TomoAlign.tomo.theta, - progress=TomoAlign.Align.progress_reprj, + RunAlign.angles_rad, + progress=RunAlign.analysis_parent.progress_reprj, ) sim = np.concatenate(sim, axis=1) # only flip the simulated datasets if using normal tomopy algorithm @@ -246,63 +266,65 @@ def align_joint(TomoAlign): pass else: sim = np.flip(sim, axis=0) + # sim = np.flip(sim, axis=2) # Cross correlation shift_cpu = [] batch_cross_correlation( - TomoAlign.prjs, + RunAlign.prjs, sim, shift_cpu, num_batches, upsample_factor, - subset_correlation=False, - blur=False, - pad=TomoAlign.pad_ds, - progress=TomoAlign.Align.progress_phase_cross_corr, + subset_correlation=RunAlign.use_subset_correlation, + subset_x=subset_x, + subset_y=subset_y, + blur=True, + pad=RunAlign.pad_ds, + progress=RunAlign.analysis_parent.progress_phase_cross_corr, ) - TomoAlign.shift = np.concatenate(shift_cpu, axis=1) - + RunAlign.shift = np.concatenate(shift_cpu, axis=1) # Shifting. ( - TomoAlign.prjs, - TomoAlign.sx, - TomoAlign.sy, - TomoAlign.shift, + RunAlign.prjs, + RunAlign.sx, + RunAlign.sy, + RunAlign.shift, err, - TomoAlign.pad_ds, + RunAlign.pad_ds, center, ) = shift_prj_update_shift_cp( - TomoAlign.prjs, - TomoAlign.sx, - TomoAlign.sy, - TomoAlign.shift, + RunAlign.prjs, + RunAlign.sx, + RunAlign.sy, + RunAlign.shift, num_batches, - TomoAlign.pad_ds, + RunAlign.pad_ds, center, - downsample_factor=TomoAlign.downsample_factor, - progress=TomoAlign.Align.progress_shifting, + downsample_factor=RunAlign.ds_factor, + progress=RunAlign.analysis_parent.progress_shifting, ) - TomoAlign.conv[n] = np.linalg.norm(err) + RunAlign.conv[n] = np.linalg.norm(err) # update images - image_projection.image = TomoAlign.prjs[projection_num] + image_projection.image = RunAlign.prjs[projection_num] image_simulated.image = sim[projection_num] # update plot lines line_conv.x = np.arange(0, n + 1) - line_conv.y = TomoAlign.conv[range(n + 1)] - line.y = [TomoAlign.sx, TomoAlign.sy] - TomoAlign.recon = np.concatenate(TomoAlign.recon, axis=0) + line_conv.y = RunAlign.conv[range(n + 1)] + line.y = [RunAlign.sx * RunAlign.ds_factor, RunAlign.sy * RunAlign.ds_factor] + RunAlign.recon = np.concatenate(RunAlign.recon, axis=0) mempool = cp.get_default_memory_pool() mempool.free_all_blocks() # Re-normalize data - TomoAlign.prjs *= scl - TomoAlign.recon = circ_mask(TomoAlign.recon, 0) + RunAlign.prjs *= scl + RunAlign.recon = circ_mask(RunAlign.recon, 0) if downsample: - TomoAlign.sx /= TomoAlign.downsample_factor - TomoAlign.sy /= TomoAlign.downsample_factor - TomoAlign.shift /= TomoAlign.downsample_factor + RunAlign.sx *= RunAlign.ds_factor + RunAlign.sy *= RunAlign.ds_factor + RunAlign.shift *= RunAlign.ds_factor - TomoAlign.pad = tuple([int(x / TomoAlign.downsample_factor) for x in TomoAlign.pad_ds]) - return TomoAlign + RunAlign.pad = tuple([int(x * RunAlign.ds_factor) for x in RunAlign.pad_ds]) + return RunAlign def simulate_projections(rec, sim, center, theta, progress=None): @@ -312,7 +334,12 @@ def simulate_projections(rec, sim, center, theta, progress=None): vol_geom = astra.create_vol_geom(_rec.shape[1], _rec.shape[1], _rec.shape[0]) phantom_id = astra.data3d.create("-vol", vol_geom, data=_rec) proj_geom = astra.create_proj_geom( - "parallel3d", 1, 1, _rec.shape[0], _rec.shape[1], theta, + "parallel3d", + 1, + 1, + _rec.shape[0], + _rec.shape[1], + theta, ) if center is not None: center_shift = -(center - _rec.shape[1] / 2) @@ -338,9 +365,12 @@ def batch_cross_correlation( rin=0.5, rout=0.8, subset_correlation=False, + subset_x=None, + subset_y=None, mask_sim=True, pad=(0, 0), progress=None, + median_filter=True, ): # TODO: the sign convention for shifting is bad here. # To fix this, change to @@ -358,21 +388,20 @@ def batch_cross_correlation( # simulated projections have data outside of the mask. if subset_correlation: _prj_gpu = cp.array( - _prj[batch][ - :, 2 * pad[1] : -2 * pad[1] : 1, 2 * pad[0] : -2 * pad[0] : 1 - ], + _prj[batch][:, subset_y[0] : subset_y[1], subset_x[0] : subset_x[1]], dtype=cp.float32, ) _sim_gpu = cp.array( - _sim[batch][ - :, 2 * pad[1] : -2 * pad[1] : 1, 2 * pad[0] : -2 * pad[0] : 1 - ], + _sim[batch][:, subset_y[0] : subset_y[1], subset_x[0] : subset_x[1]], dtype=cp.float32, ) else: _prj_gpu = cp.array(_prj[batch], dtype=cp.float32) _sim_gpu = cp.array(_sim[batch], dtype=cp.float32) + if median_filter: + _prj_gpu = ndi_cp.median_filter(_prj_gpu, size=(1, 5, 5)) + _sim_gpu = ndi_cp.median_filter(_sim_gpu, size=(1, 5, 5)) if mask_sim: _sim_gpu = cp.where(_prj_gpu < 1e-7, 0, _sim_gpu) @@ -384,7 +413,10 @@ def batch_cross_correlation( # In the warping section, we have to now warp prj by (-50, 0), so the # SAME sign of the shift value given here. shift_gpu = phase_cross_correlation( - _sim_gpu, _prj_gpu, upsample_factor=upsample_factor, return_error=False, + _sim_gpu, + _prj_gpu, + upsample_factor=upsample_factor, + return_error=False, ) shift_cpu.append(cp.asnumpy(shift_gpu)) if progress is not None: @@ -429,13 +461,14 @@ def blur_edges_cp(prj, low=0, high=0.8): return prj_gpu -def shift_prj_cp(prj, sx, sy, num_batches, pad, use_corr_prj_gpu=False): +def shift_prj_cp( + prj, sx, sy, num_batches, pad, use_pad_cond=True, use_corr_prj_gpu=False +): # add checks for sx, sy having the same dimension as prj prj_cpu = np.array_split(prj, num_batches, axis=0) _sx = np.array_split(sx, num_batches, axis=0) _sy = np.array_split(sy, num_batches, axis=0) for batch in range(len(prj_cpu)): - # for batch in tnrange(len(prj_cpu), desc="Shifting", leave=True): _prj_gpu = cp.array(prj_cpu[batch], dtype=cp.float32) num_theta = _prj_gpu.shape[0] shift_y_condition = pad[1] @@ -445,9 +478,13 @@ def shift_prj_cp(prj, sx, sy, num_batches, pad, use_corr_prj_gpu=False): if ( np.absolute(_sx[batch][image]) < shift_x_condition and np.absolute(_sy[batch][image]) < shift_y_condition + and use_pad_cond ): shift_tuple = (_sy[batch][image], _sx[batch][image]) _prj_gpu[image] = ndi_cp.shift(_prj_gpu[image], shift_tuple, order=5) + elif not use_pad_cond: + shift_tuple = (_sy[batch][image], _sx[batch][image]) + _prj_gpu[image] = ndi_cp.shift(_prj_gpu[image], shift_tuple, order=5) prj_cpu[batch] = cp.asnumpy(_prj_gpu) prj_cpu = np.concatenate(prj_cpu, axis=0) @@ -463,7 +500,7 @@ def shift_prj_update_shift_cp( pad, center, downsample_factor=1, - smart_shift=True, + smart_shift=False, smart_pad=True, progress=None, ): @@ -473,7 +510,7 @@ def shift_prj_update_shift_cp( # TODO: add checks for sx, sy having the same dimension as prj # # If the shift starts to get larger than the padding in one direction, - # shift it to the center of the sx values. This should help to avoid + # shift it to the center of the sx values. average_sx = None average_sy = None if smart_shift: diff --git a/tomopyui/tomocupy/prep/sampling.py b/tomopyui/tomocupy/prep/sampling.py new file mode 100644 index 0000000..196bbcb --- /dev/null +++ b/tomopyui/tomocupy/prep/sampling.py @@ -0,0 +1,53 @@ +import cupy as cp +import numpy as np +from cupyx.scipy import ndimage as ndi_cp + + +def shrink_projections(images, high_energy, low_energy, num_batches, order=3): + shrink_ratio = low_energy / high_energy + _images = np.array_split(images, num_batches, axis=0) + zoomed_image_cpu = [] + for batch in _images: + batch_gpu = cp.array(batch, dtype=cp.float32) + zoomed_image_gpu = ndi_cp.zoom( + batch_gpu, (1, shrink_ratio, shrink_ratio), order=order + ) + zoomed_image_cpu.append(cp.asnumpy(zoomed_image_gpu)) + + zoomed_image_cpu = np.concatenate(zoomed_image_cpu, axis=0) + mempool = cp.get_default_memory_pool() + mempool.free_all_blocks() + return zoomed_image_cpu + + +def shrink_and_pad_projections( + images_low, images_high, low_energy, high_energy, num_batches, order=3 +): + shrink_ratio = low_energy / high_energy + _images = np.array_split(images_low, num_batches, axis=0) + zoomed_image_cpu = [] + ref_shape = images_high.shape + for batch in _images: + batch_gpu = cp.array(batch, dtype=cp.float32) + zoomed_image_gpu = ndi_cp.zoom( + batch_gpu, (1, shrink_ratio, shrink_ratio), order=order + ) + zoomed_image_gpu = pad_to_make_same_size_cp(batch_gpu, ref_shape) + zoomed_image_cpu.append(cp.asnumpy(zoomed_image_gpu)) + + zoomed_image_cpu = np.concatenate(zoomed_image_cpu, axis=0) + mempool = cp.get_default_memory_pool() + mempool.free_all_blocks() + return zoomed_image_cpu + + +def pad_to_make_same_size_cp(images_to_pad, ref_shape): + to_pad_shape = images_to_pad.shape + diffshape = [y - x for x, y in zip(to_pad_shape, ref_shape)] + diffshape[0] = 0 + diffshape = [ + [x / 2, x / 2] if x % 2 == 0 else [x / 2 + 0.5, x / 2 - 0.5] for x in diffshape + ] + pad = tuple([(int(x[0]), int(x[1])) for x in diffshape]) + images_padded = cp.pad(images_to_pad, pad) + return images_padded diff --git a/tomopyui/tomocupy/recon/algorithm.py b/tomopyui/tomocupy/recon/algorithm.py index e30bf50..1efa828 100644 --- a/tomopyui/tomocupy/recon/algorithm.py +++ b/tomopyui/tomocupy/recon/algorithm.py @@ -1,7 +1,6 @@ -#!/usr/bin/env python +import astra from tomopy.recon import algorithm as tomopy_algorithm -import astra def recon_sirt_plugin(prj, angles, num_iter=1, rec=None, center=None): diff --git a/tomopyui/widgets/Untitled.ipynb b/tomopyui/widgets/Untitled.ipynb deleted file mode 100644 index 2ddf5b5..0000000 --- a/tomopyui/widgets/Untitled.ipynb +++ /dev/null @@ -1,83 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "a63e287f-c3f3-49c4-9b14-0e2a216ae785", - "metadata": {}, - "outputs": [], - "source": [ - "import ipyvolume" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "3841b2cd-d0a4-4ecf-ae64-c0d12d50820a", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "8366862c50a04dbdb76faff60e54b5f5", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "VBox(children=(Figure(camera=PerspectiveCamera(aspect=0.8, fov=45.0, matrixWorldNeedsUpdate=True, position=(2.…" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "0796406c00a747afa4d88216a701ced7", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Scatter(color_selected=array('white', dtype='" - ) - - # -- Button to load metadata ---------------------------------------------- - obj.load_metadata_button = Button( - description="Click to load metadata.", - icon="upload", - disabled=True, - button_style="info", # 'success', 'info', 'warning', 'danger' or '' - tooltip="First choose a metadata file in the Import tab, then click here", - layout=Layout(width="auto", justify_content="center"), - ) - - # -- Plotting ------------------------------------------------------------- - obj.plot_prj_images_button = Button( - description="Click to plot projection images.", - disabled=False, - button_style="info", - tooltip="Plot the prj images to be loaded into alignment.", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - - obj.plotting_vbox = VBox( - [obj.plot_prj_images_button, VBox([]), obj.prj_plotter.set_range_button,], - layout=Layout(align_content="center"), - ) - - obj.plotter_accordion = Accordion( - children=[obj.plotting_vbox], - selected_index=None, - titles=("Plot Projection Images",), - ) - - # -- Saving Options ------------------------------------------------------- - obj.save_opts = {key: False for key in obj.save_opts_list} - obj.save_opts_checkboxes = helpers.create_checkboxes_from_opt_list( - obj.save_opts_list, obj.save_opts, obj - ) - - # -- Method Options ------------------------------------------------------- - obj.methods_opts = { - key: False for key in obj.tomopy_methods_list + obj.astra_cuda_methods_list - } - obj.tomopy_methods_checkboxes = helpers.create_checkboxes_from_opt_list( - obj.tomopy_methods_list, obj.methods_opts, obj - ) - obj.astra_cuda_methods_checkboxes = helpers.create_checkboxes_from_opt_list( - obj.astra_cuda_methods_list, obj.methods_opts, obj - ) - - # -- Projection Range Sliders --------------------------------------------- - # Sliders are defined from the plotter. Probably a better way to go about - # this. - - obj.prj_range_x_slider = obj.prj_plotter.prj_range_x_slider - link_range_x = link( - (obj.prj_range_x_slider, "value"), (obj.prj_plotter.prj_range_x_slider, "value") - ) - obj.prj_range_y_slider = obj.prj_plotter.prj_range_y_slider - link_range_y = link( - (obj.prj_range_y_slider, "value"), (obj.prj_plotter.prj_range_y_slider, "value") - ) - - # -- Options ---------------------------------------------------------- - - # Number of iterations - obj.num_iterations_textbox = IntText( - description="Number of Iterations: ", - style=extend_description_style, - value=obj.num_iter, - ) - - # Center - obj.center_textbox = FloatText( - description="Center of Rotation: ", - style=extend_description_style, - value=obj.center, - ) - center_link = link( - (obj.center_textbox, "value"), (obj.Center.center_textbox, "value") - ) - - # Downsampling - obj.downsample_checkbox = Checkbox(description="Downsample?", value=False) - obj.downsample_factor_textbox = BoundedFloatText( - value=obj.downsample_factor, - min=0.001, - max=1.0, - description="Downsample factor:", - disabled=True, - style=extend_description_style, - ) - - # Batch size - obj.num_batches_textbox = IntText( - description="Number of batches (for GPU): ", - style=extend_description_style, - value=obj.num_batches, - ) - - # X Padding - obj.paddingX_textbox = IntText( - description="Padding X (px): ", - style=extend_description_style, - value=obj.paddingX, - ) - - # Y Padding - obj.paddingY_textbox = IntText( - description="Padding Y (px): ", - style=extend_description_style, - value=obj.paddingY, - ) - # Extra options - obj.extra_options_textbox = Text( - description="Extra options: ", - placeholder='{"MinConstraint": 0}', - style=extend_description_style, - ) - - # -- Object-specific widgets ---------------------------------------------- - - if obj.widget_type == "Align": - - # -- Description of turn-on radio ------------------------------------- - obj.radio_description = "Would you like to align this dataset?" - obj.radio_description = HTML( - value="

" - + obj.radio_description - + "

" - ) - # -- Progress bars and plotting output -------------------------------- - obj.progress_total = IntProgress(description="Recon: ", value=0, min=0, max=1) - obj.progress_reprj = IntProgress(description="Reproj: ", value=0, min=0, max=1) - obj.progress_phase_cross_corr = IntProgress( - description="Phase Corr: ", value=0, min=0, max=1 - ) - obj.progress_shifting = IntProgress( - description="Shifting: ", value=0, min=0, max=1 - ) - obj.plot_output1 = Output() - obj.plot_output2 = Output() - - # -- Button to start alignment ---------------------------------------- - obj.start_button = Button( - description="After choosing all of the options above, click this button to start the alignment.", - disabled=True, - button_style="info", # 'success', 'info', 'warning', 'danger' or '' - tooltip="Start alignment with this button.", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - # -- Upsample factor -------------------------------------------------- - obj.upsample_factor_textbox = FloatText( - description="Upsample Factor: ", - style=extend_description_style, - value=obj.upsample_factor, - ) - - elif obj.widget_type == "Recon": - - # -- Description of turn-on radio ------------------------------------- - obj.radio_description = "Would you like to reconstruct this dataset?" - obj.radio_description = HTML( - value="

" - + obj.radio_description - + "

" - ) - # -- Button to start reconstruction ----------------------------------- - obj.start_button = Button( - description="After choosing all of the options above, click this button to start the reconstruction.", - disabled=True, - button_style="info", # 'success', 'info', 'warning', 'danger' or '' - tooltip="Start reconstruction with this button.", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - - -def _set_widgets_from_load_metadata(obj): - - # -- Radio to turn on partial dataset ------------------------------------- - if obj.partial: - obj.radio_fulldataset.value = "Partial" - else: - obj.radio_fulldataset.value = "Full" - - # -- Saving Options ------------------------------------------------------- - obj.save_opts_checkboxes = helpers.set_checkbox_bool( - obj.save_opts_checkboxes, obj.metadata["save_opts"], obj - ) - - # -- Method Options ------------------------------------------------------- - # for key in obj.metadata["methods"]: - # if obj.metadata["methods"][key]: - # for checkbox in obj.methods_checkboxes: - # if checkbox.description == str(key): - # checkbox.value = True - # elif not obj.metadata["methods"][key]: - # for checkbox in obj.methods_checkboxes: - # if checkbox.description == str(key): - # checkbox.value = False - obj.methods_checkboxes = helpers.set_checkbox_bool( - obj.methods_checkboxes, obj.metadata["methods"], obj - ) - - # -- Projection Range Sliders --------------------------------------------- - # Not implemented in load metadata. - - # -- Options ---------------------------------------------------------- - - # Number of iterations - obj.num_iterations_textbox.value = obj.num_iter - - # Center - obj.center_textbox.value = obj.center - - # Downsampling - obj.downsample_checkbox.value = obj.downsample - obj.downsample_factor_textbox.value = obj.downsample_factor - if obj.downsample_checkbox.value: - obj.downsample_factor_textbox.disabled = False - - # Batch size - obj.num_batches_textbox.value = obj.num_batches - - # X Padding - obj.paddingX_textbox.value = obj.paddingX - - # Y Padding - obj.paddingY_textbox.value = obj.paddingY - - # Extra options - obj.extra_options_textbox.value = str(obj.extra_options) - - # -- Object-specific widgets ---------------------------------------------- - - if obj.widget_type == "Align": - - # -- Upsample factor -------------------------------------------------- - obj.upsample_factor_textbox.value = obj.upsample_factor - - return obj diff --git a/tomopyui/widgets/_shared/archive/import_data_old.py b/tomopyui/widgets/_shared/archive/import_data_old.py deleted file mode 100644 index 274f27d..0000000 --- a/tomopyui/widgets/_shared/archive/import_data_old.py +++ /dev/null @@ -1,335 +0,0 @@ -from ipywidgets import * -from ipyfilechooser import FileChooser -import functools -import tomopy.data.tomodata as td -import pdb - -default_generalmetadata = {"analysis_date": "20000101"} -default_importmetadata = dict(tomo={}, flat={}, dark={}) - - -def create_import_box( - importmetadata=default_importmetadata, generalmetadata=default_generalmetadata -): - - cwd = generalmetadata["starting_wd"] - main_logger = generalmetadata["main_logger"] - main_handler = generalmetadata["main_handler"] - - extend_description_style = {"description_width": "auto"} - radio_drive_import = RadioButtons( - options=["C:", "Z:"], - description="Choose the drive your data is on:", - style=extend_description_style, - ) - - def update_drive(change): - if change.new == 1: - tomofc.reset(path="Z:/") - darkfc.reset(path="Z:/") - flatfc.reset(path="Z:/") - elif change.new == 0: - tomofc.reset(path="C:/") - darkfc.reset(path="C:/") - flatfc.reset(path="C:/") - - # make sure the file choosers are going to the correct directory. - radio_drive_import.observe(update_drive, names="index") - - # File choosers for each type of data - tomofc = FileChooser(path=cwd) - darkfc = FileChooser(path=cwd) - flatfc = FileChooser(path=cwd) - - # defining importmetadata callbacks. You should select tomo first to define the file path. - def update_tomofname(self): - importmetadata["tomo"]["fpath"] = self.selected_path - importmetadata["tomo"]["fname"] = self.selected_filename - darkfc.reset(path=self.selected_path) - flatfc.reset(path=self.selected_path) - - def update_flatfname(self): - importmetadata["flat"]["fpath"] = self.selected_path - importmetadata["flat"]["fname"] = self.selected_filename - - def update_darkfname(self): - importmetadata["dark"]["fpath"] = self.selected_path - importmetadata["dark"]["fname"] = self.selected_filename - - tomofc.register_callback(update_tomofname) - flatfc.register_callback(update_flatfname) - darkfc.register_callback(update_darkfname) - - def update_datatype(self): - if self["owner"].description == "Tomo Image Type:": - importmetadata["tomo"]["imgtype"] = self["new"] - if self["owner"].description == "Flat Image Type:": - importmetadata["flat"]["imgtype"] = self["new"] - if self["owner"].description == "Dark Image Type:": - importmetadata["dark"]["imgtype"] = self["new"] - - radio_import_options = ["tiff", "tiff folder", "h5", "one image"] - - def create_filetype_radio( - description, options=radio_import_options, value="tiff", disabled=False - ): - radio = RadioButtons( - options=options, - value=value, - description=description, - disabled=disabled, - style={"description_width": "auto"}, - ) - return radio - - # create radio buttons for image type - tomo_radio = create_filetype_radio("Tomo Image Type:") - flat_radio = create_filetype_radio("Flat Image Type:") - dark_radio = create_filetype_radio("Dark Image Type:") - - # make radio buttons do something - tomo_radio.observe(update_datatype, names="value") - flat_radio.observe(update_datatype, names="value") - dark_radio.observe(update_datatype, names="value") - - # initialize metadata. probably can find a way to do this in the create_filetype_radio function. - importmetadata["tomo"]["imgtype"] = "tiff" - importmetadata["flat"]["imgtype"] = "tiff" - importmetadata["dark"]["imgtype"] = "tiff" - - for key in importmetadata: - importmetadata[key]["imgtype"] = "tiff" - importmetadata[key]["opts"] = {} - importmetadata[key]["fpath"] = None - importmetadata[key]["fname"] = None - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_dict_on_checkmark_import(change, opt_list, dictname): - importmetadata[dictname]["opts"] = create_option_dictionary(opt_list) - - # create checkboxes for other import options - def create_import_option_checkbox(description, disabled=False, value=0): - checkbox = Checkbox(description=description, disabled=disabled, value=value) - return checkbox - - # create other import options - other_import_options = ["downsample", "rotate", "jawn1", "jawn2"] - for key in importmetadata: - importmetadata[key]["opts"] = {opt: False for opt in other_import_options} - - tomo_import_other_options = [] - flat_import_other_options = [] - dark_import_other_options = [] - for key in importmetadata: - if key == "tomo": - for opt in other_import_options: - tomo_import_other_options.append(create_import_option_checkbox(opt)) - # make them clickable, creates dictionary when clicked - [ - opt.observe( - functools.partial( - create_dict_on_checkmark_import, - opt_list=tomo_import_other_options, - dictname=key, - ), - names=["value"], - ) - for opt in tomo_import_other_options - ] - if key == "flat": - for opt in other_import_options: - flat_import_other_options.append(create_import_option_checkbox(opt)) - # make them clickable, creates dictionary when clicked - [ - opt.observe( - functools.partial( - create_dict_on_checkmark_import, - opt_list=flat_import_other_options, - dictname=key, - ), - names=["value"], - ) - for opt in flat_import_other_options - ] - if key == "dark": - for opt in other_import_options: - dark_import_other_options.append(create_import_option_checkbox(opt)) - # make them clickable, creates dictionary when clicked - [ - opt.observe( - functools.partial( - create_dict_on_checkmark_import, - opt_list=dark_import_other_options, - dictname=key, - ), - names=["value"], - ) - for opt in dark_import_other_options - ] - - # function to create grid of checkboxes - def assign_checkbox_to_grid(optlist, grid_size_horiz=2, grid_size_vert=2): - _optlist = optlist - grid = GridspecLayout(grid_size_vert, grid_size_horiz) - grid[0, 0] = _optlist[0] - grid[0, 1] = _optlist[1] - grid[1, 0] = _optlist[2] - grid[1, 1] = _optlist[3] - return grid - - # create grid of checkboxes - tomo_import_other_options = assign_checkbox_to_grid(tomo_import_other_options) - flat_import_other_options = assign_checkbox_to_grid(flat_import_other_options) - dark_import_other_options = assign_checkbox_to_grid(dark_import_other_options) - - # create upload button - def parse_upload_type(metadata, datadict): - for key in metadata: - if metadata[key]["fpath"] is not None: - datadict[key] = td.TomoData(metadata=metadata[key]) - - return datadict - - def upload_data_on_click(self): - if self.button_style == "success" or isinstance("tomo_norm_mlog", td.TomoData): - self.button_style = "warning" - self.description = "It seems you already uploaded your data. Upload again?" - self.icon = "exclamation-triangle" - elif self.icon == "question": - self.button_style = "" - self.icon = "" - self.description = "Press this button to upload data into memory." - self.tooltip = "Upload your datasets (tomo, dark, and flat chosen above)" - elif self.button_style == "" or self.button_style == "warning": - self.button_style = "info" - self.icon = "fas fa-cog fa-spin fa-lg" - self.description = "Uploading data." - try: - datadict = {} - importmetadata["tomo"]["start_angle"] = angle_start_textbox.value - importmetadata["tomo"]["end_angle"] = angle_end_textbox.value - importmetadata["tomo"][ - "num_theta" - ] = number_of_projections_textbox.value - datadict = parse_upload_type(importmetadata, datadict) - self.button_style = "success" - self.description = "Upload complete." - self.icon = "fa-check-square" - if "flat" in datadict and "dark" in datadict: - tomo_norm, importmetadata["tomo_norm_mlog"] = td.normalize( - datadict["tomo"], datadict["flat"], datadict["dark"] - ) - main_logger.info("Normalized the data.") - else: - importmetadata["tomo_norm_mlog"] = datadict["tomo"] - main_logger.info( - "Darks and flats have not been uploaded into memory. Assuming your data is already normalized and -log." - ) - except: - self.icon = "question" - self.description = r"That didn't work. Sure you chose the correct files/formats? Click again to reset." - self.button_style = "warning" - - upload_data_button = Button( - description="Press this button to upload data into memory.", - disabled=False, - button_style="", # 'success', 'info', 'warning', 'danger' or '' - tooltip="Upload your datasets (tomo, dark, and flat chosen above)", - layout=Layout(width="auto"), - ) - upload_data_button.on_click(upload_data_on_click) - - # create container for all functions above - raw_import_box_layout = Layout( - border="3px solid blue", - width="90%", - align_items="center", - justify_content="center", - padding="50px", - ) - inner_box_layout = Layout( - border="2px solid green", - width="100%", - align_items="stretch", - margin="10px 0px 10px 0px", - ) - - tomo_import_hb = HBox( - [ - Label(value="Tomo data", layout=Layout(width="100px")), - tomofc, - tomo_radio, - tomo_import_other_options, - ], - layout=inner_box_layout, - ) - flat_import_hb = HBox( - [ - Label(value="Flat data", layout=Layout(width="100px")), - flatfc, - flat_radio, - flat_import_other_options, - ], - layout=inner_box_layout, - ) - dark_import_hb = HBox( - [ - Label(value="Dark data", layout=Layout(width="100px")), - darkfc, - dark_radio, - dark_import_other_options, - ], - layout=inner_box_layout, - ) - - text_description_style = {"description_width": "auto"} - - angle_start_textbox = FloatText( - value=-90, - description="Starting angle (\u00b0):", - disabled=False, - style=text_description_style, - ) - - angle_end_textbox = FloatText( - value=89.5, - description="Ending angle (\u00b0):", - disabled=False, - style=text_description_style, - ) - - number_of_projections_textbox = IntText( - value=360, - description="Number of Images", - disabled=False, - style=text_description_style, - ) - - angles_hb = HBox( - [ - radio_drive_import, - angle_start_textbox, - angle_end_textbox, - number_of_projections_textbox, - ] - ) - - raw_data_import_box = VBox( - children=[ - angles_hb, - tomo_import_hb, - flat_import_hb, - dark_import_hb, - upload_data_button, - ], - layout=raw_import_box_layout, - ) - - return ( - generalmetadata, - importmetadata, - raw_data_import_box, - ) diff --git a/tomopyui/widgets/_shared/archive/increase_global_fontsize.py b/tomopyui/widgets/_shared/archive/increase_global_fontsize.py deleted file mode 100644 index 6330ac9..0000000 --- a/tomopyui/widgets/_shared/archive/increase_global_fontsize.py +++ /dev/null @@ -1,10 +0,0 @@ - -%%html - \ No newline at end of file diff --git a/tomopyui/widgets/_shared/archive/plot_projections.py b/tomopyui/widgets/_shared/archive/plot_projections.py deleted file mode 100644 index 7fdcb34..0000000 --- a/tomopyui/widgets/_shared/archive/plot_projections.py +++ /dev/null @@ -1,274 +0,0 @@ -from ipywidgets import * -from ipyfilechooser import FileChooser -from skimage.transform import rescale -from matplotlib import pyplot as plt -from matplotlib import animation -from IPython.display import HTML -from tomopy.widgets.file_chooser_recon import file_chooser_recon -import tomopy.data.tomodata as td - - -def plot_projections( - reconmetadata, - alignmentmetadata, - importmetadata, - generalmetadata, - alignmentdata, - widget_linker, -): - # Importing file chooser box - recon_files, uploaders, opts_for_uploaders, angles_hb = file_chooser_recon( - reconmetadata, generalmetadata - ) - widget_linker["recon_files"] = recon_files - - # Initialize sliders, etc. - extend_description_style = {"description_width": "auto"} - skip_theta = IntSlider() - projection_range_x = IntRangeSlider( - description="Projection X Range:", - layout=Layout(width="70%"), - style=extend_description_style, - ) - projection_range_y = IntRangeSlider( - description="Projection Y Range:", - layout=Layout(width="70%"), - style=extend_description_style, - ) - projection_range_theta = IntRangeSlider( - description="Projection Theta Range:", - layout=Layout(width="70%"), - style=extend_description_style, - ) - - def load_data_for_plot(self): - - projection_range_x.description = "Projection X Range:" - projection_range_y.description = "Projection Y Range:" - projection_range_theta.description = "Projection Theta Range:" - skip_theta.description = "Skip theta:" - projection_range_x.max = current_tomo.prj_imgs.shape[2] - 1 - projection_range_y.max = current_tomo.prj_imgs.shape[1] - 1 - projection_range_theta.max = current_tomo.prj_imgs.shape[0] - 1 - projection_range_x.value = [0, current_tomo.prj_imgs.shape[2] - 1] - projection_range_y.value = [0, current_tomo.prj_imgs.shape[1] - 1] - projection_range_theta.value = [0, current_tomo.prj_imgs.shape[0] - 1] - - projection_range_x.min = 0 - projection_range_x.step = 1 - projection_range_x.disabled = False - projection_range_x.continuous_update = False - projection_range_x.orientation = "horizontal" - projection_range_x.readout = True - projection_range_x.readout_format = "d" - projection_range_x.layout = Layout(width="70%") - projection_range_x.style = extend_description_style - - projection_range_y.min = 0 - projection_range_y.step = 1 - projection_range_y.disabled = False - projection_range_y.continuous_update = False - projection_range_y.orientation = "horizontal" - projection_range_y.readout = True - projection_range_y.readout_format = "d" - projection_range_y.layout = Layout(width="70%") - projection_range_y.style = extend_description_style - - projection_range_theta.min = 0 - projection_range_theta.step = 1 - projection_range_theta.disabled = False - projection_range_theta.continuous_update = False - projection_range_theta.orientation = "horizontal" - projection_range_theta.readout = True - projection_range_theta.readout_format = "d" - projection_range_theta.layout = Layout(width="70%") - projection_range_theta.style = extend_description_style - - skip_theta.value = 20 - skip_theta.min = 1 - skip_theta.max = 50 - skip_theta.step = 1 - skip_theta.disabled = False - skip_theta.continuous_update = False - skip_theta.orientation = "horizontal" - skip_theta.readout = True - skip_theta.readout_format = "d" - skip_theta.layout = Layout(width="70%") - skip_theta.style = extend_description_style - - load_data_button = Button( - description="Click to load the selected data.", layout=Layout(width="auto") - ) - load_data_button.on_click(load_data_for_plot) - - # Radio for use of raw/normalized data, or normalized + aligned. - - def create_tomo_from_fc(dropdown_choice): - tomo = td.TomoData(metadata=reconmetadata["tomo"]["tomo_0"]) - for key in reconmetadata["tomo"]: - print(key) - if "fname" in reconmetadata["tomo"][key]: - print(reconmetadata["tomo"][key]["fname"]) - if reconmetadata["tomo"][key]["fname"] == dropdown_choice: - tomo = td.TomoData(metadata=reconmetadata["tomo"][key]) - print(tomo.prj_imgs) - return tomo - - def define_tomo_dropdown_options(alignmentdata, uploaders): - aligned_dropdown_options = [] - uploaded_dropdown_options = [] - num_uploaders = len(uploaders) - if uploaders is not None: - active_tomo_fc_list = [ - uploaders[i] - for i in range(num_uploaders) - if uploaders[i].selected_path is not None - ] - uploaded_dropdown_options = [ - active_tomo_fc_list[i].selected_filename - for i in range(len(active_tomo_fc_list)) - ] - if alignmentdata is not None: - aligned_tomo_dropdown_options = [ - f"alignment_{i}" for i in range(len(alignmentdata)) - ] - tomo_dropdown.options = ( - ["..."] + uploaded_dropdown_options + aligned_dropdown_options - ) - tomo_dropdown.value = "..." - - def upload_current_tomo(self): - global current_tomo - if self["new"].__contains__("alignment_"): - alignment_number = int(filter(str.isdigit, self["new"])) - current_tomo = alignmentdata[alignment_number].tomo - else: - print(self["new"]) - current_tomo = create_tomo_from_fc(self["new"]) - print(current_tomo.prj_imgs) - - tomo_dropdown = Dropdown( - options=["...", "Upload data..."], - value="...", - description="Normalized Tomo:", - disabled=False, - style=extend_description_style, - ) - - define_tomo_dropdown_options(alignmentdata, uploaders) - tomo_dropdown.observe(upload_current_tomo, names="value") - - def update_dropdownmenu(self): - key = self.title - reconmetadata["tomo"][key]["fpath"] = self.selected_path - reconmetadata["tomo"][key]["fname"] = self.selected_filename - define_tomo_dropdown_options(alignmentdata, uploaders) - - uploader_no = 10 - for i in range(uploader_no): - uploaders[i].register_callback(update_dropdownmenu) - - plot_output = Output() - movie_output = Output() - - def plot_projection_movie(tomodata, range_x, range_y, range_z, skip, scale_factor): - - frames = [] - animSliceNos = range(range_z[0], range_z[1], skip) - volume = tomodata.prj_imgs[ - range_z[0] : range_z[1] : skip, - range_y[0] : range_y[1] : 1, - range_x[0] : range_x[1] : 1, - ] - volume_rescaled = rescale( - volume, (1, scale_factor, scale_factor), anti_aliasing=False - ) - fig, ax = plt.subplots(figsize=(10, 10)) - for i in range(len(animSliceNos)): - frames.append([ax.imshow(volume_rescaled[i], cmap="viridis")]) - ani = animation.ArtistAnimation( - fig, frames, interval=50, blit=True, repeat_delay=100 - ) - # plt.close() - display(HTML(ani.to_jshtml())) - - def create_projection_movie_on_click(button_click): - movie_output.clear_output() - with movie_output: - create_movie_button.button_style = "info" - create_movie_button.icon = "fas fa-cog fa-spin fa-lg" - create_movie_button.description = "Making a movie." - plot_projection_movie( - current_tomo, - projection_range_x.value, - projection_range_y.value, - projection_range_theta.value, - skip_theta.value, - 0.1, - ) - create_movie_button.button_style = "success" - create_movie_button.icon = "square-check" - create_movie_button.description = "Do it again?" - - # Making a movie button - create_movie_button = Button( - description="Click me to create a movie", layout=Layout(width="auto") - ) - create_movie_button.on_click(create_projection_movie_on_click) - movie_output = Output() - movie_output.layout = Layout(width="100%", height="100%", align_items="center") - - grid_movie = GridBox( - children=[create_movie_button, movie_output], - layout=Layout( - width="100%", - grid_template_rows="auto", - grid_template_columns="15% 84%", - grid_template_areas=""" - "create_movie_button movie_output" - """, - ), - ) - - plot_box_layout = Layout( - border="3px solid blue", - width="100%", - height="auto", - align_items="center", - justify_content="center", - ) - - plot_vbox = VBox( - [ - tomo_dropdown, - load_data_button, - projection_range_x, - projection_range_y, - projection_range_theta, - skip_theta, - grid_movie, - ], - layout=plot_box_layout, - ) - - return plot_vbox, recon_files - - # widget_linker["projection_range_x_movie"] = projection_range_x - # widget_linker["projection_range_y_movie"] = projection_range_y - # widget_linker["projection_range_theta_movie"] = projection_range_theta - # widget_linker["skip_theta_movie"] = skip_theta - - -## USE LATER FOR RECONSTRUCTION PLOTTING: - -# if aligned_or_uploaded_radio.value == "Last Recon:": -# projection_range_x.description = "Recon X Range:" -# projection_range_y.description = "Recon Y Range:" -# projection_range_Z.description = "Recon Z Range:" -# skip_theta.description = "Skip z:" -# projection_range_x.max = current_tomo.shape[2] - 1 -# projection_range_x.value = [0, current_tomo.shape[2] - 1] -# projection_range_y.max = current_tomo.shape[1] - 1 -# projection_range_y.value = [0, current_tomo.shape[1] - 1] -# projection_range_theta.value = [0, current_tomo.shape[0] - 1] -# projection_range_theta.max = current_tomo.shape[0] - 1 diff --git a/tomopyui/widgets/_shared/archive/save_folder_chooser.py b/tomopyui/widgets/_shared/archive/save_folder_chooser.py deleted file mode 100644 index 3897746..0000000 --- a/tomopyui/widgets/_shared/archive/save_folder_chooser.py +++ /dev/null @@ -1,150 +0,0 @@ -from ipywidgets import * -from ipyfilechooser import FileChooser -import os -import shutil -import numpy as np - -# TODO: -# overwrite rather than delete - - -def save_file_location(importmetadata, generalmetadata, tomodata): - extend_description_style = {"description_width": "auto"} - radio_save_drive = RadioButtons( - options=["C:", "Z:"], - description="Choose the drive you want to save the data on:", - style=extend_description_style, - layout=Layout(width="80%"), - ) - - def update_save_drive(change): - if change.new == 1: - workingdirectoryparent.reset(path="Z:/") - elif change.new == 0: - workingdirectoryparent.reset(path="C:/") - - radio_save_drive.observe(update_save_drive, names="index") - - # File chooser for the parent of the chosen working directory. - if "fpath" not in importmetadata["tomo"]: - workingdirectoryparent = FileChooser(path=r"C:/", show_only_dirs=True) - else: - workingdirectoryparent = FileChooser( - path=importmetadata["tomo"]["fpath"], show_only_dirs=True - ) - - def update_wd(self): - generalmetadata["workingdirectoryparentpath"] = self.selected_path - - workingdirectoryparent.register_callback(update_wd) - - workingdirectoryname = Text( - value=(generalmetadata["analysis_date"] + "-analysis"), - placeholder="recon", - description="Working Directory Name:", - disabled=False, - style=extend_description_style, - ) - - def overwrite_button_on_click(self): - self.icon = "fa-cog" - os.chdir(workingdirectoryparent.selected_path) - try: - shutil.rmtree(workingdirectoryname.value) - self.icon = "fa-check-square" - self.description = ( - "Directory deleted. Click button above to make a new directory." - ) - self.button_style = "success" - except: - print("Unsuccessful directory removal") - - overwrite_button = Button( - description="", - disabled=True, - button_style="", - tooltip="", - layout=Layout(width="99%"), - ) - - mkdir_button = Button( - description="Make the directory above", - disabled=False, - button_style="", - tooltip="Make the directory.", - layout=Layout(width="99%"), - ) - - def mkdir_on_button_click(self): - os.chdir(workingdirectoryparent.selected_path) - if os.path.isdir(workingdirectoryname.value): - self.button_style = "warning" - self.description = "Directory already exists" - self.icon = "fa-exclamation-triangle" - overwrite_button.description = "Click to delete. Otherwise, rename above." - overwrite_button.disabled = False - overwrite_button.button_style = "danger" - overwrite_button.tooltip = "Warning: this will overwrite all data " - overwrite_button.icon = "question" - else: - self.button_style = "success" - self.description = "Directory Created" - self.icon = "fa-check-square" - save_tomo_data_button.disabled = False - save_tomo_data_button.description = ( - "Click to save normalized tomograms in your working directory." - ) - os.mkdir(workingdirectoryname.value) - generalmetadata["workingdirectoryname"] = workingdirectoryname.value - generalmetadata["workingdirectorypath"] = ( - generalmetadata["workingdirectoryparentpath"] - + "\\" - + workingdirectoryname.value - ) - - mkdir_button.on_click(mkdir_on_button_click) - overwrite_button.on_click(overwrite_button_on_click) - - recon_dashboard_layout = Layout( - border="3px solid blue", - width="50%", - height="auto", - align_items="center", - justify_content="center", - ) - - working_directory_parent_filechooser_hb = HBox( - [Label(value="Working Directory Parent: "), workingdirectoryparent] - ) - - def save_tomo_data_on_click(self): - os.chdir(generalmetadata["workingdirectorypath"]) - self.description = "Saving" - self.icon = "gear" - np.save("tomo_norm_mlog", tomodata.prj_imgs) - self.description = "Done." - self.icon = "fa-square-check" - self.button_style = "success" - - save_tomo_data_button = Button( - description="", - disabled=True, - button_style="info", - tooltip="Save raw tomodata in the working directory you just made.", - layout=Layout(width="99%"), - ) - save_tomo_data_button.on_click(save_tomo_data_on_click) - - save_data_box = VBox( - children=[ - radio_save_drive, - working_directory_parent_filechooser_hb, - workingdirectoryname, - mkdir_button, - overwrite_button, - save_tomo_data_button, - ], - layout=recon_dashboard_layout, - ) - - return save_data_box diff --git a/tomopyui/widgets/_shared/archive/upload_tab.py b/tomopyui/widgets/_shared/archive/upload_tab.py deleted file mode 100644 index 50add6a..0000000 --- a/tomopyui/widgets/_shared/archive/upload_tab.py +++ /dev/null @@ -1,128 +0,0 @@ -from ipyfilechooser import FileChooser -from ipywidgets import * -import functools - - -# will create one file uploader, given a specific working directory and a title -# for the uploader -def create_file_uploader(wd, title): - - return - - -def file_chooser_recon(reconmetadata): - extend_description_style = {"description_width": "auto"} - uploader_no = 10 - uploaders = [FileChooser(path=cwd, title=f"tomo_{i}") for i in range(uploader_no)] - - # def update_fnames(self): - # key = self.title - # reconmetadata["tomo"][key]["fpath"] = self.selected_path - # reconmetadata["tomo"][key]["fname"] = self.selected_filename - - reconmetadata["tomo"] = {f"tomo_{i}": {} for i in range(uploader_no)} - # for i in range(uploader_no): - # uploaders[i].register_callback(update_fnames) - - ############### Creating options checkboxes - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_dict_on_checkmark_import(change, opt_list, dictname): - reconmetadata["tomo"][dictname]["opts"] = create_option_dictionary(opt_list) - - def create_import_option_checkbox(description, disabled=False, value=0): - checkbox = Checkbox(description=description, disabled=disabled, value=value) - return checkbox - - other_import_options = [ - "rotate", - ] - # this will be in the same order as uploaders - # (uploaders[i] matches opts_for_uploaders[i]) - opts_for_uploaders = [[] for n in range(uploader_no)] - i = 0 - for key in reconmetadata["tomo"]: - for opt in other_import_options: - opts_for_uploaders[i].append(create_import_option_checkbox(opt)) - # make them clickable, creates dictionary when clicked - [ - opt.observe( - functools.partial( - create_dict_on_checkmark_import, opt_list=[opt], dictname=key, - ), - names=["value"], - ) - for opt in opts_for_uploaders[i] - ] - i += 1 - - # Similar to above, we create angle start/end textboxes for all: - - def angle_callbacks(change, description, tomo_number): - reconmetadata["tomo"][f"tomo_{tomo_number}"][description] = change.new - - def create_angles_textboxes(tomo_number): - angle_start_textbox = FloatText( - value=-90, - description="Starting angle (\u00b0):", - disabled=False, - style=extend_description_style, - ) - - angle_end_textbox = FloatText( - value=89.5, - description="Ending angle (\u00b0):", - disabled=False, - style=extend_description_style, - ) - - # currently unused. automatically grabs num_theta from files - number_of_projections_textbox = IntText( - value=360, - description="Number of Images", - disabled=False, - style=extend_description_style, - ) - - reconmetadata["tomo"][f"tomo_{tomo_number}"][ - "start_angle" - ] = angle_start_textbox.value - reconmetadata["tomo"][f"tomo_{tomo_number}"][ - "end_angle" - ] = angle_end_textbox.value - - angle_start_textbox.observe( - functools.partial( - angle_callbacks, description="start_angle", tomo_number=tomo_number - ), - names="value", - ) - angle_end_textbox.observe( - functools.partial( - angle_callbacks, description="end_angle", tomo_number=tomo_number - ), - names="value", - ) - - angles_hbox = HBox( - [angle_start_textbox, angle_end_textbox, number_of_projections_textbox] - ) - - return angles_hbox - - angles_hb = [ - create_angles_textboxes(tomo_number) for tomo_number in range(len(uploaders)) - ] - - ####### Combining everything - uploaders_textboxes_checkboxes = [ - HBox([uploaders[i], angles_hb[i], *opts_for_uploaders[i]]) - for i in range(len(uploaders)) - ] - recon_files = VBox( - uploaders_textboxes_checkboxes, - layout=Layout(flex_flow="row wrap", width="100%"), - ) - return recon_files, uploaders, opts_for_uploaders, angles_hb diff --git a/tomopyui/widgets/_shared/archive/upload_tab_importmetadata.py b/tomopyui/widgets/_shared/archive/upload_tab_importmetadata.py deleted file mode 100644 index e7bb907..0000000 --- a/tomopyui/widgets/_shared/archive/upload_tab_importmetadata.py +++ /dev/null @@ -1,125 +0,0 @@ -from ipyfilechooser import FileChooser -from ipywidgets import * -import functools - - -# will create one file uploader, given a specific working directory and a title -# for the uploader - - -def file_chooser_recon(reconmetadata): - extend_description_style = {"description_width": "auto"} - uploader_no = 10 - uploaders = [FileChooser(path=cwd, title=f"tomo_{i}") for i in range(uploader_no)] - - # def update_fnames(self): - # key = self.title - # reconmetadata["tomo"][key]["fpath"] = self.selected_path - # reconmetadata["tomo"][key]["fname"] = self.selected_filename - - reconmetadata["tomo"] = {f"tomo_{i}": {} for i in range(uploader_no)} - # for i in range(uploader_no): - # uploaders[i].register_callback(update_fnames) - - ############### Creating options checkboxes - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_dict_on_checkmark_import(change, opt_list, dictname): - reconmetadata["tomo"][dictname]["opts"] = create_option_dictionary(opt_list) - - def create_import_option_checkbox(description, disabled=False, value=0): - checkbox = Checkbox(description=description, disabled=disabled, value=value) - return checkbox - - other_import_options = [ - "rotate", - ] - # this will be in the same order as uploaders - # (uploaders[i] matches opts_for_uploaders[i]) - opts_for_uploaders = [[] for n in range(uploader_no)] - i = 0 - for key in reconmetadata["tomo"]: - for opt in other_import_options: - opts_for_uploaders[i].append(create_import_option_checkbox(opt)) - # make them clickable, creates dictionary when clicked - [ - opt.observe( - functools.partial( - create_dict_on_checkmark_import, opt_list=[opt], dictname=key, - ), - names=["value"], - ) - for opt in opts_for_uploaders[i] - ] - i += 1 - - # Similar to above, we create angle start/end textboxes for all: - - def angle_callbacks(change, description, tomo_number): - reconmetadata["tomo"][f"tomo_{tomo_number}"][description] = change.new - - def create_angles_textboxes(tomo_number): - angle_start_textbox = FloatText( - value=-90, - description="Starting angle (\u00b0):", - disabled=False, - style=extend_description_style, - ) - - angle_end_textbox = FloatText( - value=89.5, - description="Ending angle (\u00b0):", - disabled=False, - style=extend_description_style, - ) - - # currently unused. automatically grabs num_theta from files - number_of_projections_textbox = IntText( - value=360, - description="Number of Images", - disabled=False, - style=extend_description_style, - ) - - reconmetadata["tomo"][f"tomo_{tomo_number}"][ - "start_angle" - ] = angle_start_textbox.value - reconmetadata["tomo"][f"tomo_{tomo_number}"][ - "end_angle" - ] = angle_end_textbox.value - - angle_start_textbox.observe( - functools.partial( - angle_callbacks, description="start_angle", tomo_number=tomo_number - ), - names="value", - ) - angle_end_textbox.observe( - functools.partial( - angle_callbacks, description="end_angle", tomo_number=tomo_number - ), - names="value", - ) - - angles_hbox = HBox( - [angle_start_textbox, angle_end_textbox, number_of_projections_textbox] - ) - - return angles_hbox - - angles_hb = [ - create_angles_textboxes(tomo_number) for tomo_number in range(len(uploaders)) - ] - - ####### Combining everything - uploaders_textboxes_checkboxes = [ - HBox([uploaders[i], angles_hb[i], *opts_for_uploaders[i]]) - for i in range(len(uploaders)) - ] - recon_files = VBox( - uploaders_textboxes_checkboxes, - layout=Layout(flex_flow="row wrap", width="100%"), - ) - return recon_files, uploaders, opts_for_uploaders, angles_hb diff --git a/tomopyui/widgets/_shared/archive/user_information.py b/tomopyui/widgets/_shared/archive/user_information.py deleted file mode 100644 index e166c45..0000000 --- a/tomopyui/widgets/_shared/archive/user_information.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from ipywidgets import * - - -def create_user_info_box(metadata): - extend_description_style = {"description_width": "auto"} - collection_date = DatePicker( - description="Date that you collected your data:", - style=extend_description_style, - disabled=False, - layout=Layout(width="99%", font=30), - ) - - analysis_date = DatePicker( - description="Date you are doing this analysis:", - style=extend_description_style, - disabled=False, - layout=Layout(width="99%"), - ) - - user_name = Text( - value="", - style=extend_description_style, - placeholder="X-ray Microscopist", - description="Your name:", - disabled=False, - layout=Layout(width="99%"), - ) - - user_institution = Text( - value="", - style=extend_description_style, - placeholder="Stanford Synchrotron Radiation Lightsource", - description="Your institution:", - disabled=False, - layout=Layout(width="99%"), - ) - - phone_number = Text( - value="", - style=extend_description_style, - placeholder="555-555-5555", - description="Your phone number (for texting you when finished with jobs):", - disabled=False, - layout=Layout(width="99%"), - ) - - phone_number = Text( - value="", - style=extend_description_style, - placeholder="555-555-5555", - description="Your phone number (for texting you when finished with jobs):", - disabled=False, - layout=Layout(width="99%"), - ) - - carrier = Dropdown( - value="Verizon", - options=["Verizon", "T-Mobile"], - style=extend_description_style, - placeholder="555-555-5555", - description="Your carrier:", - disabled=False, - layout=Layout(width="99%"), - ) - email = Text( - value="", - style=extend_description_style, - placeholder="user@slac.stanford.edu", - description="Your email:", - disabled=False, - layout=Layout(width="99%"), - ) - - def update_collection_date(change): - metadata["collection_date"] = change.new - metadata["collection_date"] = str(metadata["collection_date"]).replace("-", "") - - def update_analysis_date(change): - metadata["analysis_date"] = change.new - metadata["analysis_date"] = str(metadata["analysis_date"]).replace("-", "") - - def update_user_name(change): - metadata["user_name"] = change.new - - def update_user_institution(change): - metadata["user_institution"] = change.new - - def update_phone_number(change): - metadata["phone_number"] = change.new - - def update_email(change): - metadata["email"] = change.new - - def update_carrier(change): - metadata["carrier"] = change.new - - user_name.observe(update_analysis_date, names="value") - user_institution.observe(update_analysis_date, names="value") - collection_date.observe(update_collection_date, names="value") - analysis_date.observe(update_analysis_date, names="value") - phone_number.observe(update_phone_number, names="value") - carrier.observe(update_carrier, names="value") - email.observe(update_email, names="value") - - box_layout = Layout(border="3px solid green", width="50%", align_items="stretch") - infobox = VBox( - children=[ - user_name, - user_institution, - phone_number, - carrier, - email, - collection_date, - analysis_date, - ], - layout=box_layout, - ) - - return metadata, infobox diff --git a/tomopyui/widgets/_shared/helpers.py b/tomopyui/widgets/_shared/helpers.py deleted file mode 100644 index 3cca7ff..0000000 --- a/tomopyui/widgets/_shared/helpers.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from ipywidgets import * - -import os -import glob -import numpy as np -import json -import functools -import tifffile as tf -import asyncio -import logging -import ipywidgets as widgets -import importlib.util -import sys - - -def import_module_set_env(import_dict): - """ - From https://stackoverflow.com/questions/1051254/check-if-python-package-is-installed - - Safely imports a module or package and sets an environment variable if it - imports (or is already imported). This is used in the main function for - checking whether or not `cupy` is installed. If it is not installed, then - options for cuda-enabled functions will be greyed out. - """ - for key in import_dict: - if key in sys.modules: - os.environ[import_dict[key]] = "True" - pass - elif (spec := importlib.util.find_spec(key)) is not None: - module = importlib.util.module_from_spec(spec) - sys.modules[key] = module - spec.loader.exec_module(module) - os.environ[import_dict[key]] = "True" - else: - os.environ[import_dict[key]] = "False" - pass - - -# From ipywidgets readthedocs -class OutputWidgetHandler(logging.Handler): - """Custom logging handler sending logs to an output widget""" - - def __init__(self, *args, **kwargs): - super(OutputWidgetHandler, self).__init__(*args, **kwargs) - layout = {"width": "100%", "height": "160px", "border": "1px solid black"} - self.out = Output(layout=layout) - - def emit(self, record): - """Overload of logging.Handler method""" - formatted_record = self.format(record) - new_output = { - "name": "stdout", - "output_type": "stream", - "text": formatted_record + "\n", - } - self.out.outputs = (new_output,) + self.out.outputs - - def show_logs(self): - """Show the logs""" - display(self.out) - - def clear_logs(self): - """Clear the current logs""" - self.out.clear_output() - - -def return_handler(logger, logging_level=None): - handler = OutputWidgetHandler() - handler.setFormatter( - logging.Formatter("%(asctime)s - [%(levelname)s] %(message)s") - ) - # handler.show_logs() - logger.addHandler(handler) - logger.setLevel(logging_level) # log at info level. - return handler, logger - - -def get_img_shape(fpath, fname, ftype, metadata, folder_import=False): - """ """ - - os.chdir(fpath) - if ftype == "tiff": - tiff_count_in_folder = len(glob.glob1(fpath, "*.tif")) - # TODO: make this so it just reads the number of tiffs in the folder - # and the first tiff to avoid loading data into memory. - if folder_import: - _tomo = td.TomoData(metadata=metadata) - size = _tomo.prj_imgs.shape - sizeZ = size[0] - sizeY = size[1] - sizeX = size[2] - else: - with tf.TiffFile(fname) as tif: - # if you select a file instead of a file path, it will try to - # bring in the full folder - # this may cause issues if someone is trying to bring in a - # file in a folder with a lot of tiffs. can just make a note - # to do the analysis in a 'fresh' folder - if tiff_count_in_folder > 50: - sizeX = tif.pages[0].tags["ImageWidth"].value - sizeY = tif.pages[0].tags["ImageLength"].value - sizeZ = tiff_count_in_folder # can maybe use this later - else: - imagesize = tif.pages[0].tags["ImageDescription"] - size = json.loads(imagesize.value)["shape"] - sizeZ = size[0] - sizeY = size[1] - sizeX = size[2] - - elif ftype == "npy": - size = np.load(fname, mmap_mode="r").shape - sizeY = size[1] - sizeX = size[2] - - return (sizeZ, sizeY, sizeX) - - -class MetaCheckbox: - def __init__(self, description, dictionary, obj, disabled=False, value=False): - - self.checkbox = Checkbox( - description=description, value=value, disabled=disabled - ) - - def create_opt_dict_on_check(change): - dictionary[description] = change.new - obj.set_metadata() # obj needs a set_metadata function - - self.checkbox.observe(create_opt_dict_on_check, names="value") - - -def create_checkbox(description, disabled=False, value=False): - checkbox = Checkbox(description=description, disabled=disabled, value=value) - return checkbox - - -def create_checkboxes_from_opt_list(opt_list, dictionary, obj): - checkboxes = [MetaCheckbox(opt, dictionary, obj) for opt in opt_list] - return [a.checkbox for a in checkboxes] # return list of checkboxes - - -def set_checkbox_bool(checkbox_list, dictionary, obj): - def create_opt_dict_on_check(change): - dictionary[change.owner.description] = change.new - obj.set_metadata() # obj needs a set_metadata function - - for key in dictionary: - if dictionary[key]: - for checkbox in checkbox_list: - if checkbox.description == str(key): - checkbox.value = True - checkbox.observe(create_opt_dict_on_check, names="value") - elif not dictionary[key]: - for checkbox in checkbox_list: - if checkbox.description == str(key): - checkbox.value = False - checkbox.observe(create_opt_dict_on_check, names="value") - return checkbox_list - - -class Timer: - def __init__(self, timeout, callback): - self._timeout = timeout - self._callback = callback - - async def _job(self): - await asyncio.sleep(self._timeout) - self._callback() - - def start(self): - self._task = asyncio.ensure_future(self._job()) - - def cancel(self): - self._task.cancel() - - -def debounce(wait): - """Decorator that will postpone a function's - execution until after `wait` seconds - have elapsed since the last time it was invoked.""" - - def decorator(fn): - timer = None - - def debounced(*args, **kwargs): - nonlocal timer - - def call_it(): - fn(*args, **kwargs) - - if timer is not None: - timer.cancel() - timer = Timer(wait, call_it) - timer.start() - - return debounced - - return decorator diff --git a/tomopyui/widgets/_shared/output_handler.py b/tomopyui/widgets/_shared/output_handler.py deleted file mode 100644 index eedf729..0000000 --- a/tomopyui/widgets/_shared/output_handler.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import logging -import ipywidgets as widgets - - -class OutputWidgetHandler(logging.Handler): - """Custom logging handler sending logs to an output widget""" - - def __init__(self, *args, **kwargs): - super(OutputWidgetHandler, self).__init__(*args, **kwargs) - layout = {"width": "100%", "height": "160px", "border": "1px solid black"} - self.out = widgets.Output(layout=layout) - - def emit(self, record): - """Overload of logging.Handler method""" - formatted_record = self.format(record) - new_output = { - "name": "stdout", - "output_type": "stream", - "text": formatted_record + "\n", - } - self.out.outputs = (new_output,) + self.out.outputs - - def show_logs(self): - """Show the logs""" - display(self.out) - - def clear_logs(self): - """Clear the current logs""" - self.out.clear_output() - - -def return_handler(logger, logging_level=None): - handler = OutputWidgetHandler() - handler.setFormatter( - logging.Formatter("%(asctime)s - [%(levelname)s] %(message)s") - ) - # handler.show_logs() - logger.addHandler(handler) - logger.setLevel(logging_level) # log at info level. - return handler, logger diff --git a/tomopyui/widgets/analysis.py b/tomopyui/widgets/analysis.py new file mode 100644 index 0000000..76b0793 --- /dev/null +++ b/tomopyui/widgets/analysis.py @@ -0,0 +1,804 @@ +import numpy as np +import copy +import pathlib + +from ipywidgets import * +from tomopyui._sharedvars import * +from abc import ABC, abstractmethod +from tomopyui.widgets.view import ( + BqImViewer_Projections_Parent, + BqImViewer_Projections_Child, + BqImViewer_Projections_Child, +) +from tomopyui.backend.runanalysis import RunAlign, RunRecon +from tomopyui.backend.io import ( + Projections_Child, + Metadata_Align, + Metadata_Recon, +) +from tomopyui.widgets.helpers import ( + ReactiveTextButton, + ReactiveIconButton, + SwitchOffOnIconButton, + ImportButton, +) + + +class AnalysisBase(ABC): + def init_attributes(self, Import, Center): + + self.Import = Import + self.Center = Center + self.projections = Import.projections + self.imported_viewer = BqImViewer_Projections_Parent() + self.imported_viewer.create_app() + self.altered_viewer = BqImViewer_Projections_Child(self.imported_viewer) + self.altered_viewer.create_app() + self.result_after_viewer = BqImViewer_Projections_Child(self.altered_viewer) + self.wd = None + self.log_handler, self.log = Import.log_handler, Import.log + self.downsample = False + self.ds_factor = 4 + self.copy_hists = True + self.shift_full_dataset_after = True + self.pyramid_level = 1 + self.num_iter = 10 + self.center = Center.current_center + self.upsample_factor = 50 + self.use_multiple_centers = False + self.extra_options = {} + self.num_batches = 20 + self.px_range_x = (0, 10) + self.px_range_y = (0, 10) + self.padding_x = 50 + self.padding_y = 20 + self.use_subset_correlation = False + self.pre_alignment_iters = 1 + self.tomopy_methods_list = [key for key in tomopy_recon_algorithm_kwargs] + self.tomopy_methods_list.remove("gridrec") + self.tomopy_methods_list.remove("fbp") + self.astra_cuda_methods_list = [ + key for key in astra_cuda_recon_algorithm_kwargs + ] + self.run_list = [] + self.header_font_style = { + "font_size": "22px", + "font_weight": "bold", + "font_variant": "small-caps", + # "text_color": "#0F52BA", + } + self.accordions_open = False + self.plot_output1 = Output() + + def init_widgets(self): + """ + Initializes many of the widgets in the Alignment and Recon tabs. + """ + self.button_font = {"font_size": "22px"} + self.button_layout = Layout(width="45px", height="40px") + + # -- Button to turn on tab --------------------------------------------- + self.open_accordions_button = Button( + icon="lock-open", + layout=self.button_layout, + style=self.button_font, + ) + + # -- Headers for plotting ------------------------------------- + self.import_plot_header = "Imported Projections" + self.import_plot_header = Label( + self.import_plot_header, style=self.header_font_style + ) + self.altered_plot_header = "Altered Projections" + self.altered_plot_header = Label( + self.altered_plot_header, style=self.header_font_style + ) + + # -- Headers for results ------------------------------------- + self.before_analysis_plot_header = "Analysis Projections" + self.before_analysis_plot_header = Label( + self.before_analysis_plot_header, style=self.header_font_style + ) + self.after_analysis_plot_header = "Result" + self.after_analysis_plot_header = Label( + self.after_analysis_plot_header, style=self.header_font_style + ) + + # -- Button to load metadata ---------------------------------------------- + self.load_metadata_button = Button( + description="Click to load metadata.", + icon="upload", + disabled=True, + button_style="info", # 'success', 'info', 'warning', 'danger' or '' + tooltip="First choose a metadata file in the Import tab, then click here", + layout=Layout(width="auto", justify_content="center"), + ) + + self.viewer_hbox = HBox( + [ + VBox( + [ + self.import_plot_header, + self.imported_viewer.app, + ], + layout=Layout(align_items="center"), + ), + VBox( + [ + self.altered_plot_header, + self.altered_viewer.app, + ], + layout=Layout(align_items="center"), + ), + ], + layout=Layout(justify_content="center"), + ) + + self.viewer_accordion = Accordion( + children=[self.viewer_hbox], + selected_index=None, + titles=("Narrow Data Range",), + ) + + # -- Saving Options ------------------------------------------------------- + self.save_opts = {key: False for key in self.save_opts_list} + self.save_opts_checkboxes = self.create_checkboxes_from_opt_list( + self.save_opts_list, self.save_opts + ) + + # -- Method Options ------------------------------------------------------- + self.methods_opts = { + key: False + for key in self.tomopy_methods_list + self.astra_cuda_methods_list + } + self.tomopy_methods_checkboxes = self.create_checkboxes_from_opt_list( + self.tomopy_methods_list, self.methods_opts + ) + self.astra_cuda_methods_checkboxes = self.create_checkboxes_from_opt_list( + self.astra_cuda_methods_list, self.methods_opts + ) + + # -- Options ---------------------------------------------------------- + + # Number of iterations + self.num_iterations_textbox = IntText( + description="Number of Iterations: ", + style=extend_description_style, + value=self.num_iter, + ) + + # Center + self.center_textbox = FloatText( + description="Center of Rotation: ", + style=extend_description_style, + value=self.center, + ) + center_link = link( + (self.center_textbox, "value"), (self.Center.center_textbox, "value") + ) + # Center + self.use_multiple_centers_checkbox = Checkbox( + description="Use multiple centers?", value=False + ) + # Downsampling + self.downsample_checkbox = Checkbox(description="Downsample?", value=False) + self.ds_factor_dropdown = Dropdown( + options=[("Original", -1), (2, 0), (4, 1), (8, 2)], + description="Downsample factor: ", + disabled=True, + style=extend_description_style, + ) + # Phase cross correlation subset (from altered projections) + self.use_subset_correlation_checkbox = Checkbox( + description="Phase Corr. Subset?", value=False + ) + + # Batch size + self.num_batches_textbox = IntText( + description="Number of batches (for GPU): ", + style=extend_description_style, + value=self.num_batches, + ) + + # X Padding + self.padding_x_textbox = IntText( + description="Padding X (px): ", + style=extend_description_style, + value=self.padding_x, + ) + + # Y Padding + self.padding_y_textbox = IntText( + description="Padding Y (px): ", + style=extend_description_style, + value=self.padding_y, + ) + + # Pre-alignment iterations + self.pre_alignment_iters_textbox = IntText( + description="Pre-alignment iterations: ", + style=extend_description_style, + value=self.pre_alignment_iters, + ) + + # Extra options + self.extra_options_textbox = Text( + description="Extra options: ", + placeholder='{"MinConstraint": 0}', + style=extend_description_style, + ) + + def refresh_plots(self): + self.imported_viewer.plot(self.projections, no_check=True) + self.altered_projections = Projections_Child(self.projections) + self.altered_viewer.projections = self.altered_projections + self.altered_viewer.copy_parent_projections() + + def set_observes(self): + + # -- Radio to turn on tab --------------------------------------------- + self.open_accordions_button.on_click(self.activate_tab) + + # -- Load metadata button --------------------------------------------- + self.load_metadata_button.on_click(self._load_metadata_all_on_click) + + # -- Options ---------------------------------------------------------- + + # Center + self.center_textbox.observe(self.update_center_textbox, names="value") + self.use_multiple_centers_checkbox.observe( + self.update_use_multiple_centers, names="value" + ) + + # Downsampling + self.downsample_checkbox.observe(self._downsample_turn_on) + self.altered_viewer.ds_viewer_dropdown.observe( + self.update_ds_factor_from_viewer, names="value" + ) + self.ds_factor_dropdown.observe(self.update_ds_factor, names="value") + + # Phase cross correlation subset (from altered projections) + self.use_subset_correlation_checkbox.observe( + self._use_subset_correlation, names="value" + ) + + # X Padding + self.padding_x_textbox.observe(self.update_x_padding, names="value") + + # Y Padding + self.padding_y_textbox.observe(self.update_y_padding, names="value") + + # Pre-alignment iterations + self.pre_alignment_iters_textbox.observe( + self.update_pre_alignment_iters, names="value" + ) + + # Extra options + self.extra_options_textbox.observe(self.update_extra_options, names="value") + + # Start button + self.start_button.on_click(self.set_options_and_run) + + # -- Radio to turn on tab --------------------------------------------- + def activate_tab(self, *args): + if self.accordions_open is False: + self.open_accordions_button.icon = "fa-lock" + self.open_accordions_button.button_style = "success" + self.projections = self.Import.projections + self.center = self.Center.current_center + self.center_textbox.value = self.Center.current_center + + self.load_metadata_button.disabled = False + self.start_button.disabled = False + self.save_options_accordion.selected_index = 0 + self.options_accordion.selected_index = 0 + self.methods_accordion.selected_index = 0 + self.viewer_accordion.selected_index = 0 + self.accordions_open = True + else: + self.open_accordions_button.icon = "fa-lock-open" + self.open_accordions_button.button_style = "info" + self.accordions_open = False + self.load_metadata_button.disabled = True + self.start_button.disabled = True + self.save_options_accordion.selected_index = None + self.options_accordion.selected_index = None + self.methods_accordion.selected_index = None + self.viewer_accordion.selected_index = None + self.log.info("Deactivated alignment.") + + # -- Load metadata button --------------------------------------------- + def _load_metadata_all_on_click(self, change): + self.load_metadata_button.button_style = "info" + self.load_metadata_button.icon = "fas fa-cog fa-spin fa-lg" + self.load_metadata_button.description = "Importing metadata." + self.load_metadata_align() + self.metadata.set_attributes_from_metadata() + self.set_observes() + self.load_metadata_button.button_style = "success" + self.load_metadata_button.icon = "fa-check-square" + self.load_metadata_button.description = "Finished importing metadata." + + # -- Button to start alignment ---------------------------------------- + def set_options_and_run(self, change): + change.button_style = "info" + change.icon = "fas fa-cog fa-spin fa-lg" + change.description = ( + "Setting options and loading data into alignment algorithm." + ) + self.run() + change.button_style = "success" + change.icon = "fa-check-square" + change.description = "Finished alignment." + + # -- Options ---------------------------------------------------------- + + # Copy histogram from parent + def update_copy_hist(self, change): + self.copy_hists = change.new + + def update_shift_data(self, change): + self.shift_full_dataset_after = change.new + + # Number of iterations + def update_num_iter(self, change): + self.num_iter = int(change.new) + self.progress_total.max = change.new + + # Center of rotation + def update_center_textbox(self, change): + self.center = change.new + + def update_use_multiple_centers(self, change): + self.use_multiple_centers = change.new + + # Downsampling + def _downsample_turn_on(self, change): + if change.new is True: + self.downsample = True + self.pyramid_level = self.altered_viewer.ds_viewer_dropdown.value + self.ds_factor_dropdown.disabled = False + + if change.new is False: + self.downsample = False + self.ds_factor = 1 + self.ds_factor_dropdown.disabled = True + + + # Phase cross correlation subset (from altered projections) + def _use_subset_correlation(self, change): + self.use_subset_correlation = self.use_subset_correlation_checkbox.value + + def update_ds_factor_from_viewer(self, *args): + self.ds_factor_dropdown.value = self.altered_viewer.ds_viewer_dropdown.value + + def update_ds_factor(self, *args): + self.pyramid_level = self.ds_factor_dropdown.value + self.ds_factor = np.power(2, int(self.pyramid_level + 1)) + + # Batch size + def update_num_batches(self, change): + self.num_batches = change.new + self.progress_phase_cross_corr.max = change.new + self.progress_shifting.max = change.new + self.progress_reprj.max = change.new + + # X Padding + def update_x_padding(self, change): + self.padding_x = change.new + + # Y Padding + def update_y_padding(self, change): + self.padding_y = change.new + + # Pre-alignment iterations + def update_pre_alignment_iters(self, *args): + self.pre_alignment_iters = self.pre_alignment_iters_textbox.value + + # Extra options + def update_extra_options(self, change): + self.extra_options = change.new + + def set_checkbox_bool(self, checkbox_list, dictionary): + def create_opt_dict_on_check(change): + dictionary[change.owner.description] = change.new + + + for key in dictionary: + if dictionary[key]: + for checkbox in checkbox_list: + if checkbox.description == str(key): + checkbox.value = True + checkbox.observe(create_opt_dict_on_check, names="value") + elif not dictionary[key]: + for checkbox in checkbox_list: + if checkbox.description == str(key): + checkbox.value = False + checkbox.observe(create_opt_dict_on_check, names="value") + return checkbox_list + + def create_checkboxes_from_opt_list(self, opt_list, dictionary): + checkboxes = [MetaCheckbox(opt, dictionary, self) for opt in opt_list] + return [a.checkbox for a in checkboxes] # return list of checkboxes + + def plot_result(self): + with self.plot_output1: + self.plot_output1.clear_output(wait=True) + self.output_hbox = HBox( + [ + VBox( + [ + self.before_analysis_plot_header, + self.altered_viewer.app, + ], + layout=Layout(align_items="center"), + ), + VBox( + [ + self.after_analysis_plot_header, + self.result_after_viewer.app, + ], + layout=Layout(align_items="center"), + ), + ], + layout=Layout(justify_content="center"), + ) + display(self.output_hbox) + + def containerize(self): + # -- Saving ----------------------------------------------------------- + save_hbox = VBox( + self.save_opts_checkboxes, + layout=Layout(flex_flow="column wrap", align_items="flex-start"), + ) + + self.save_options_accordion = Accordion( + children=[save_hbox], + selected_index=None, + titles=("Save Options",), + ) + + # -- Methods ---------------------------------------------------------- + self.tomopy_methods_hbox = VBox( + [ + Label("Tomopy", style=self.header_font_style), + VBox( + self.tomopy_methods_checkboxes, + layout=Layout(flex_flow="column wrap", align_content="flex-start"), + ), + ], + layout=Layout(align_items="center") + ) + + self.astra_methods_hbox = VBox( + [ + Label("Astra", style=self.header_font_style), + VBox( + self.astra_cuda_methods_checkboxes, + layout=Layout(flex_flow="column wrap"), + ), + ], + layout=Layout(align_items="center") + ) + + recon_method_box = HBox( + [self.tomopy_methods_hbox, self.astra_methods_hbox], + layout=Layout(width="auto") + ) + self.methods_accordion = Accordion( + children=[recon_method_box], selected_index=None, titles=("Methods",) + ) + + # -- Box organization ------------------------------------------------- + + self.top_of_box_hb = HBox( + [self.open_accordions_button, self.Import.switch_data_buttons], + layout=Layout( + width="auto", + justify_content="flex-start", + ), + ) + self.start_button_hb = HBox( + [self.start_button], layout=Layout(width="auto", justify_content="center") + ) + + @abstractmethod + def update_num_batches(self, *args): + ... + + @abstractmethod + def update_num_iter(self, *args): + ... + + @abstractmethod + def run(self): + ... + + @abstractmethod + def make_tab(self): + ... + + # TODO: add @abstractmethod for loading metadata + + +class Align(AnalysisBase): + def __init__(self, Import, Center): + super().init_attributes(Import, Center) + self.metadata = Metadata_Align() + self.subset_x = None + self.subset_y = None + self.save_opts_list = ["Projections Before Alignment", "Projections After Alignment", "Reconstruction", "tiff", "hdf"] + self.Import.Align = self + self.init_widgets() + self.set_observes() + self.make_tab() + + def init_widgets(self): + super().init_widgets() + # -- Progress bars and plotting output -------------------------------- + self.progress_total = IntProgress(description="Recon: ", value=0, min=0, max=1) + self.progress_reprj = IntProgress(description="Reproj: ", value=0, min=0, max=1) + self.progress_phase_cross_corr = IntProgress( + description="Phase Corr: ", value=0, min=0, max=1 + ) + self.progress_shifting = IntProgress( + description="Shifting: ", value=0, min=0, max=1 + ) + self.plot_output2 = Output() + + # -- Button to start alignment ---------------------------------------- + self.start_button = Button( + description="After choosing all of the options above, click this button to start the alignment.", + disabled=True, + button_style="info", # 'success', 'info', 'warning', 'danger' or '' + tooltip="Start alignment.", + icon="", + layout=Layout(width="auto", justify_content="center"), + ) + # -- Upsample factor -------------------------------------------------- + self.upsample_factor_textbox = FloatText( + description="Upsample Factor: ", + style=extend_description_style, + value=self.upsample_factor, + ) + # Copy parent histograms? + self.copy_parent_hists_checkbox = Checkbox( + description="Copy parent histograms", value=True + ) + self.shift_data_after_checkbox = Checkbox( + description="Shift full dataset after", value=True + ) + self.save_opts_checkboxes.append(self.copy_parent_hists_checkbox) + self.save_opts_checkboxes.append(self.shift_data_after_checkbox) + # Use this alignment button + self.save_after_alignment = False + self.use_this_alignment_button = ReactiveTextButton( + self.use_this_alignment, + "Do you want to use this alignment for another alignment or reconstruction?", + "Downsampling and updating plots.", + "This alignment has been loaded into the app.", + ) + self.use_this_alignment_button.button.disabled=True + + def set_observes(self): + super().set_observes() + self.num_iterations_textbox.observe(self.update_num_iter, names="value") + self.num_batches_textbox.observe(self.update_num_batches, names="value") + self.upsample_factor_textbox.observe(self.update_upsample_factor, names="value") + self.start_button.on_click(self.set_options_and_run) + + # Copy parent histograms + self.copy_parent_hists_checkbox.observe(self.update_copy_hist, names="value") + # Shift dataset after + self.shift_data_after_checkbox.observe(self.update_shift_data, names="value") + + def use_this_alignment(self): + if self.analysis.saved_as_hdf: + pass + else: + self.save_after_alignment = True + self.analysis.skip_mk_wd_subdir = True + self.analysis.save_data_after() + self.save_after_alignment = False + self.Import.prenorm_uploader.quick_path_search.value = str(self.analysis.projections.filepath) + self.Import.prenorm_uploader.import_data() + self.Import.use_prenorm_button.run_callback() + self.start_button_hb.children = [self.start_button] + + # Upsampling + def update_upsample_factor(self, change): + self.upsample_factor = change.new + + def update_num_batches(self, change): + self.num_batches = change.new + self.progress_phase_cross_corr.max = change.new + self.progress_shifting.max = change.new + self.progress_reprj.max = change.new + + def update_num_iter(self, change): + self.num_iter = change.new + self.progress_total.max = change.new + + def update_subset(self): + self.subset_x = self.altered_viewer.subset_x + self.subset_y = self.altered_viewer.subset_y + + def run(self): + self.use_this_alignment_button.disable() + self.metadata = Metadata_Align() + self.metadata.set_metadata(self) + self.analysis = RunAlign(self) + self.result_after_viewer.create_app() + if self.copy_hists: + self.result_after_viewer.hist.copy_parent_hist() + else: + self.result_after_viewer.hist.precomputed_hist = None + self.result_after_viewer.link_plotted_projections() + self.result_after_viewer.link_plotted_projections_button.disabled = False + self.result_after_viewer.plot(self.analysis.projections, ds=False) + self.plot_result() + self.start_button_hb.children = [self.start_button, self.use_this_alignment_button.button] + self.use_this_alignment_button.enable() + + def make_tab(self): + + self.containerize() + + self.options_accordion = Accordion( + children=[ + HBox( + [ + HBox([self.center_textbox]), + HBox([self.num_iterations_textbox, self.pre_alignment_iters_textbox]), + HBox([self.padding_x_textbox, self.padding_y_textbox]), + HBox([self.downsample_checkbox, self.ds_factor_dropdown]), + self.use_subset_correlation_checkbox, + self.num_batches_textbox, + self.upsample_factor_textbox, + self.extra_options_textbox, + ], + layout=Layout(flex_flow="row wrap", justify_content="flex-start"), + ), + ], + selected_index=None, + titles=("Alignment Options",), + ) + + self.progress_hbox = HBox( + [ + self.progress_total, + self.progress_reprj, + self.progress_phase_cross_corr, + self.progress_shifting, + ], + layout=Layout(justify_content="center"), + ) + + self.tab = VBox( + children=[ + self.top_of_box_hb, + self.viewer_accordion, + # TODO: implement load metadata again + # self.load_metadata_button, + HBox([self.methods_accordion, + self.options_accordion, + self.save_options_accordion]), + self.start_button_hb, + self.progress_hbox, + VBox( + [self.plot_output1, self.plot_output2], + ), + ] + ) + + +class Recon(AnalysisBase): + def __init__(self, Import, Center): + super().init_attributes(Import, Center) + self.metadata = Metadata_Recon() + self.save_opts_list = ["Reconstruction"] + self.Import.Recon = self + self.init_widgets() + self.set_observes() + # self.metadata.set_metadata(self) + for save_opt in self.save_opts_checkboxes: + if save_opt.description == "Reconstruction": + save_opt.value = True + self.make_tab() + + def init_widgets(self): + super().init_widgets() + self.plot_output2 = Output() + + # -- Button to start alignment ---------------------------------------- + self.start_button = Button( + description="After choosing all of the options above, click this button to start the reconstruction.", + disabled=True, + button_style="info", # 'success', 'info', 'warning', 'danger' or '' + tooltip="Start reconstruction.", + icon="", + layout=Layout(width="auto", justify_content="center"), + ) + + def set_observes(self): + super().set_observes() + self.num_iterations_textbox.observe(self.update_num_iter, names="value") + + # TODO: implement load metadata + # def load_metadata(self): + # self.metadata = load_metadata( + # self.Import.filedir_recon, self.Import.filename_recon + # ) + # TODO: implement load metadata + # def set_widgets_from_load_metadata(self): + # super().set_widgets_from_load_metadata() + # self.init_widgets() + # self.metadata.set_metadata(self) + # self.make_tab() + + # Batch size + def update_num_batches(self, change): + self.num_batches = change.new + + # Number of iterations + def update_num_iter(self, change): + self.num_iter = change.new + + def run(self): + self.metadata = Metadata_Recon() + self.analysis = RunRecon(self) + self.result_after_viewer.create_app() + self.analysis.projections.data = self.analysis.recon + if self.copy_hists: + self.result_after_viewer.hist.copy_parent_hist() + print("copying hists") + else: + self.result_after_viewer.hist.precomputed_hist = None + self.result_after_viewer.plot(self.analysis.projections, ds=False) + self.plot_result() + + def make_tab(self): + self.containerize() + self.options_accordion = Accordion( + children=[ + VBox( + [ + HBox([self.use_multiple_centers_checkbox,self.center_textbox]), + self.num_iterations_textbox, + HBox([self.padding_x_textbox,self.padding_y_textbox]), + HBox([self.downsample_checkbox,self.ds_factor_dropdown]), + self.extra_options_textbox, + ], + ), + ], + selected_index=None, + titles=("Reconstruction Options",), + ) + + self.tab = VBox( + children=[ + self.top_of_box_hb, + self.viewer_accordion, + # TODO: implement load metadata again + # self.load_metadata_button, + HBox([self.methods_accordion, + self.options_accordion, + self.save_options_accordion]), + self.start_button_hb, + self.plot_output1, + ] + ) + + +class MetaCheckbox: + def __init__(self, description, dictionary, obj, disabled=False, value=False): + + self.checkbox = Checkbox( + description=description, value=value, disabled=disabled + ) + + def create_opt_dict_on_check(change): + dictionary[description] = change.new + # obj.metadata.set_metadata(obj) # obj needs a Metadata instance + + self.checkbox.observe(create_opt_dict_on_check, names="value") diff --git a/tomopyui/widgets/archive/alignment_box.py b/tomopyui/widgets/archive/alignment_box.py deleted file mode 100644 index 3adc56c..0000000 --- a/tomopyui/widgets/archive/alignment_box.py +++ /dev/null @@ -1,598 +0,0 @@ -from ipywidgets import * -import functools -import tomopy.data.tomodata as td -from tomopy.data.tomoalign import TomoAlign, init_new_from_prior - -# import tomopy.data.tomo as td - -# TODO: This is very disorganized. Try to bring some order/organization. - - -def alignment_box(meta, aligned_tomo_list, tomo, widget_linker, generalmetadata): - main_logger = generalmetadata["main_logger"] - main_handler = generalmetadata["main_handler"] - - projection_range_x_movie = widget_linker["projection_range_x_movie"] - projection_range_y_movie = widget_linker["projection_range_y_movie"] - projection_range_theta_movie = widget_linker["projection_range_theta_movie"] - skip_theta_movie = widget_linker["skip_theta_movie"] - - extend_description_style = {"description_width": "auto"} - - radio_align = RadioButtons( - options=["Yes", "No"], - style=extend_description_style, - layout=Layout(width="20%"), - value="No", - ) - radio_align_fulldataset = RadioButtons( - options=["Full", "Partial"], - style=extend_description_style, - layout=Layout(width="20%"), - disabled=True, - value="Full", - ) - # make sure the file choosers are going to the correct directory. - projection_range_x_alignment = IntRangeSlider( - value=[0, tomo.prj_imgs.shape[2] - 1], - min=0, - max=tomo.prj_imgs.shape[2] - 1, - step=1, - description="Projection X Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="100%"), - style=extend_description_style, - ) - - projection_range_y_alignment = IntRangeSlider( - value=[0, tomo.prj_imgs.shape[1] - 1], - min=0, - max=tomo.prj_imgs.shape[1] - 1, - step=1, - description="Projection Y Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="100%"), - style=extend_description_style, - ) - - load_range_from_above = Button( - description="Click to load projection range from above.", - disabled=True, - button_style="info", - tooltip="Make sure to choose all of the buttons above before clicking this button", - icon="", - layout=Layout(width="95%", justify_content="center"), - ) - - number_of_align_iterations = IntText( - description="Number of Iterations: ", style=extend_description_style, value=20, - ) - center_of_rotation = IntText( - description="Center of Rotation: ", - style=extend_description_style, - value=tomo.prj_imgs.shape[2] / 2, - ) - upsample_factor = IntText( - description="Upsample Factor: ", style=extend_description_style, value=1 - ) - batch_size = IntText( - description="Batch Size (!: if too small, can crash memory) ", - style=extend_description_style, - value=20, - layout=Layout(width="auto"), - ) - paddingX = IntText( - description="Padding X: ", style=extend_description_style, value=10 - ) - paddingY = IntText( - description="Padding Y: ", style=extend_description_style, value=10 - ) - extra_options = Text( - description="Extra options: ", - placeholder='{"MinConstraint": 0}', - style=extend_description_style, - ) - - align_start_button = Button( - description="After choosing all of the options above, click this button to start the alignment.", - disabled=True, - button_style="info", # 'success', 'info', 'warning', 'danger' or '' - tooltip="Make sure to choose all of the buttons above before clicking this button", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - - # enable the alignment gui if on. - def radio_align_true(change): - if change.new == 0: - radio_align_fulldataset.disabled = False - align_start_button.disabled = False - meta["aligndata"] = True - meta["methods"]["SIRT_CUDA"] = {} - other_options_accordion.selected_index = 0 - grid_methods_accordion.selected_index = 0 - save_options_accordion.selected_index = 0 - - elif change.new == 1: - radio_align_fulldataset.disabled = True - align_start_button.disabled = True - projection_range_x_alignment.disabled = True - projection_range_y_alignment.disabled = True - load_range_from_above.disabled = True - meta["aligndata"] = False - other_options_accordion.selected_index = None - grid_methods_accordion.selected_index = None - save_options_accordion.selected_index = None - - def radio_align_full_partial(change): - if change.new == 1: - projection_range_x_alignment.disabled = False - projection_range_y_alignment.disabled = False - load_range_from_above.disabled = False - meta["aligndata"] = True - load_range_from_above.description = ( - "Click to load projection range from above." - ) - load_range_from_above.icon = "" - elif change.new == 0: - if "range_y_link" in locals() or "range_y_link" in globals(): - range_y_link.unlink() - range_x_link.unlink() - load_range_from_above.button_style = "info" - load_range_from_above.description = ( - "Unlinked ranges. Enable partial range to link again." - ) - load_range_from_above.icon = "unlink" - projection_range_x_alignment.value = [0, tomo.prj_imgs.shape[2] - 1] - projection_range_x_alignment.disabled = True - projection_range_y_alignment.value = [0, tomo.prj_imgs.shape[1] - 1] - projection_range_y_alignment.disabled = True - load_range_from_above.disabled = True - meta["aligndata"] = False - - def load_range_from_above_onclick(self): - if self.button_style == "info": - global range_y_link, range_x_link - range_y_link = link( - (projection_range_y_movie, "value"), - (projection_range_y_alignment, "value"), - ) - range_x_link = link( - (projection_range_x_movie, "value"), - (projection_range_x_alignment, "value"), - ) - self.button_style = "success" - self.description = "Linked ranges. Click again to unlink." - self.icon = "link" - elif self.button_style == "success": - range_y_link.unlink() - range_x_link.unlink() - projection_range_x_alignment.value = [0, tomo.prj_imgs.shape[2] - 1] - projection_range_y_alignment.value = [0, tomo.prj_imgs.shape[1] - 1] - self.button_style = "info" - self.description = "Unlinked ranges. Click again to link." - self.icon = "unlink" - - method_output = Output() - output0 = Output() - output1 = Output() - output2 = Output() - - #################### START ALIGNMENT ###################################### - def set_options_and_run_align(self): - self.icon = "fas fa-cog fa-spin fa-lg" - self.description = "Setting options and loading data into alignment algorithm." - meta["opts"]["num_iter"] = number_of_align_iterations.value - meta["opts"]["center"] = center_of_rotation.value - meta["opts"]["prj_range_x"] = projection_range_x_alignment.value - meta["opts"]["prj_range_y"] = projection_range_y_alignment.value - meta["opts"]["upsample_factor"] = upsample_factor.value - meta["opts"]["pad"] = ( - paddingX.value, - paddingY.value, - ) - meta["opts"]["batch_size"] = batch_size.value - meta["opts"]["extra_options"] = extra_options.value - meta["callbacks"]["button"] = self - meta["callbacks"]["methodoutput"] = method_output - meta["callbacks"]["output0"] = output0 - meta["callbacks"]["output1"] = output1 - meta["callbacks"]["output2"] = output2 - meta["opts"]["downsample"] = downsample_checkbox.value - meta["opts"]["downsample_factor"] = downsample_factor_text.value - if len(meta["methods"]) > 1: - meta["alignmultiple"] = True - try: - self.description = "Aligning your data." - align_number = meta["align_number"] - if align_number == 0: - aligned_tomo_list.append(TomoAlign(tomo, meta)) - else: - aligned_tomo_list.append( - init_new_from_prior(aligned_tomo_list[align_number - 1], meta) - ) - self.button_style = "success" - self.icon = "fa-check-square" - self.description = "Finished alignment. Click to run second alignment with updated parameters." - meta["align_number"] += 1 - except: - with output0: - self.button_style = "warning" - self.icon = "exclamation-triangle" - self.description = "Something went wrong." - - ############################# METHOD CHOOSER GRID ############################ - grid_alignment = GridspecLayout(2, 3) - # align_FBP_CUDA = Checkbox(description="FBP_CUDA") - # align_FBP_CUDA_option1 = Checkbox(description="option1", disabled=True) - # align_FBP_CUDA_option2 = Checkbox(description="option2", disabled=True) - # align_FBP_CUDA_option3 = Checkbox(description="option3", disabled=True) - # align_FBP_CUDA_option_list = [ - # align_FBP_CUDA_option1, - # align_FBP_CUDA_option2, - # align_FBP_CUDA_option3, - # ] - - align_SIRT_CUDA = Checkbox(description="SIRT_CUDA", value=1) - align_SIRT_CUDA_option1 = Checkbox(description="Faster", disabled=False) - align_SIRT_CUDA_option2 = Checkbox(description="Fastest", disabled=False) - align_SIRT_CUDA_option3 = Checkbox(description="option3", disabled=False) - align_SIRT_CUDA_option_list = [align_SIRT_CUDA_option1, align_SIRT_CUDA_option2] - - # align_SART_CUDA = Checkbox(description="SART_CUDA") - # align_SART_CUDA_option1 = Checkbox(description="option1", disabled=True) - # align_SART_CUDA_option2 = Checkbox(description="option2", disabled=True) - # align_SART_CUDA_option3 = Checkbox(description="option3", disabled=True) - # align_SART_CUDA_option_list = [ - # align_SART_CUDA_option1, - # align_SART_CUDA_option2, - # align_SART_CUDA_option3, - # ] - - # align_CGLS_CUDA = Checkbox(description="CGLS_CUDA") - # align_CGLS_CUDA_option1 = Checkbox(description="option1", disabled=True) - # align_CGLS_CUDA_option2 = Checkbox(description="option2", disabled=True) - # align_CGLS_CUDA_option3 = Checkbox(description="option3", disabled=True) - # align_CGLS_CUDA_option_list = [ - # align_CGLS_CUDA_option1, - # align_CGLS_CUDA_option2, - # align_CGLS_CUDA_option3, - # ] - - # align_MLEM_CUDA = Checkbox(description="MLEM_CUDA") - # align_MLEM_CUDA_option1 = Checkbox(description="option1", disabled=True) - # align_MLEM_CUDA_option2 = Checkbox(description="option2", disabled=True) - # align_MLEM_CUDA_option3 = Checkbox(description="option3", disabled=True) - # align_MLEM_CUDA_option_list = [ - # align_MLEM_CUDA_option1, - # align_MLEM_CUDA_option2, - # align_MLEM_CUDA_option3, - # ] - - align_method_list = [ - # align_FBP_CUDA, - align_SIRT_CUDA, - # align_SART_CUDA, - # align_CGLS_CUDA, - # align_MLEM_CUDA, - ] - - def toggle_on(change, opt_list, dictname): - if change.new == 1: - meta["methods"][dictname] = {} - for option in opt_list: - option.disabled = False - if change.new == 0: - meta["methods"].pop(dictname) - for option in opt_list: - option.value = 0 - option.disabled = True - - # align_FBP_CUDA.observe( - # functools.partial( - # toggle_on, opt_list=align_FBP_CUDA_option_list, dictname="FBP_CUDA" - # ), - # names=["value"], - # ) - align_SIRT_CUDA.observe( - functools.partial( - toggle_on, opt_list=align_SIRT_CUDA_option_list, dictname="SIRT_CUDA" - ), - names=["value"], - ) - # align_SART_CUDA.observe( - # functools.partial( - # toggle_on, opt_list=align_SART_CUDA_option_list, dictname="SART_CUDA" - # ), - # names=["value"], - # ) - # align_CGLS_CUDA.observe( - # functools.partial( - # toggle_on, opt_list=align_CGLS_CUDA_option_list, dictname="CGLS_CUDA" - # ), - # names=["value"], - # ) - # align_MLEM_CUDA.observe( - # functools.partial( - # toggle_on, opt_list=align_MLEM_CUDA_option_list, dictname="MLEM_CUDA" - # ), - # names=["value"], - # ) - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_dict_on_checkmark(change, opt_list, dictname): - meta["methods"][dictname] = create_option_dictionary(opt_list) - - # Makes generator for mapping of options to observe functions. - - # list( - # ( - # opt.observe( - # functools.partial( - # create_dict_on_checkmark, - # opt_list=align_FBP_CUDA_option_list, - # dictname="FBP_CUDA", - # ), - # names=["value"], - # ) - # for opt in align_FBP_CUDA_option_list - # ) - # ) - list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=align_SIRT_CUDA_option_list, - dictname="SIRT_CUDA", - ), - names=["value"], - ) - for opt in align_SIRT_CUDA_option_list - ) - ) - # list( - # ( - # opt.observe( - # functools.partial( - # create_dict_on_checkmark, - # opt_list=align_SART_CUDA_option_list, - # dictname="SART_CUDA", - # ), - # names=["value"], - # ) - # for opt in align_SART_CUDA_option_list - # ) - # ) - # list( - # ( - # opt.observe( - # functools.partial( - # create_dict_on_checkmark, - # opt_list=align_CGLS_CUDA_option_list, - # dictname="CGLS_CUDA", - # ), - # names=["value"], - # ) - # for opt in align_CGLS_CUDA_option_list - # ) - # ) - # list( - # ( - # opt.observe( - # functools.partial( - # create_dict_on_checkmark, - # opt_list=align_MLEM_CUDA_option_list, - # dictname="MLEM_CUDA", - # ), - # names=["value"], - # ) - # for opt in align_MLEM_CUDA_option_list - # ) - # ) - - def fill_grid(method, opt_list, linenumber, grid): - grid[linenumber, 0] = method - i = 1 - for option in opt_list: - grid[linenumber, i] = option - i += 1 - - # fill_grid(align_FBP_CUDA, align_FBP_CUDA_option_list, 1, grid_alignment) - fill_grid(align_SIRT_CUDA, align_SIRT_CUDA_option_list, 1, grid_alignment) - # fill_grid(align_SART_CUDA, align_SART_CUDA_option_list, 3, grid_alignment) - # fill_grid(align_CGLS_CUDA, align_CGLS_CUDA_option_list, 4, grid_alignment) - # fill_grid(align_MLEM_CUDA, align_MLEM_CUDA_option_list, 5, grid_alignment) - - grid_column_headers = ["Method", "Option 1", "Option 2"] - for i, method in enumerate(grid_column_headers): - grid_alignment[0, i] = Label( - value=grid_column_headers[i], layout=Layout(justify_content="center") - ) - ##############################Alignment start button???####################### - radio_align.observe(radio_align_true, names="index") - radio_align_fulldataset.observe(radio_align_full_partial, names="index") - load_range_from_above.on_click(load_range_from_above_onclick) - align_start_button.on_click(set_options_and_run_align) - - #######################DOWNSAMPLE CHECKBOX############################ - def downsample_turn_on(change): - if change.new == 1: - meta["opts"]["downsample"] = True - downsample_factor_text.disabled = False - if change.new == 0: - meta["opts"]["downsample"] = False - downsample_factor_text.disabled = True - - downsample_checkbox = Checkbox(description="Downsample?", value=0) - meta["opts"]["downsample"] = False - downsample_checkbox.observe(downsample_turn_on) - - downsample_factor_text = BoundedFloatText( - value=0.5, - min=0.001, - max=1.0, - description="Downsampling factor:", - disabled=True, - style=extend_description_style, - ) - - ######################SAVING OPTIONS######################## - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_save_dict_on_checkmark(change, opt_list): - meta["save_opts"] = create_option_dictionary(opt_list) - - save_opts = ["tomo_after", "tomo_before", "recon", "tiff", "npy"] - - def create_save_checkboxes(opts): - checkboxes = [ - Checkbox(description=opt, style=extend_description_style,) for opt in opts - ] - return checkboxes - - save_checkboxes = create_save_checkboxes(save_opts) - - list( - ( - opt.observe( - functools.partial( - create_save_dict_on_checkmark, opt_list=save_checkboxes, - ), - names=["value"], - ) - for opt in save_checkboxes - ) - ) - - save_hbox = HBox( - save_checkboxes, - layout=Layout(flex_wrap="wrap", justify_content="space-between"), - ) - - save_options_accordion = Accordion( - children=[save_hbox], - selected_index=None, - layout=Layout(width="100%"), - titles=("Save options",), - ) - - #################### ALIGNMENT BOX ORGANIZATION ######################## - radio_description = ( - "Would you like to try automatic alignment before reconstruction?" - ) - partial_radio_description = ( - "Would you like to use the full dataset, or a partial dataset?" - ) - radio_description = HTML( - value="

" - + radio_description - + "

" - ) - partial_radio_description = HTML( - value="

" - + partial_radio_description - + "

" - ) - - pixel_range_slider_vb = VBox( - [ - HBox( - [load_range_from_above], - justify_content="center", - align_content="center", - ), - projection_range_x_alignment, - projection_range_y_alignment, - ], - layout=Layout(width="30%"), - justify_content="center", - align_items="space-between", - ) - - hb1 = HBox( - [ - radio_description, - radio_align, - partial_radio_description, - radio_align_fulldataset, - pixel_range_slider_vb, - ], - layout=Layout( - width="auto", - justify_content="center", - align_items="center", - flex="flex-grow", - ), - ) - hb2 = HBox( - [align_start_button], layout=Layout(width="auto", justify_content="center") - ) - grid_methods_accordion = Accordion( - children=[grid_alignment], selected_index=None, titles=("Alignment Methods",) - ) - - other_options_accordion = Accordion( - children=[ - VBox( - [ - HBox( - [ - number_of_align_iterations, - center_of_rotation, - upsample_factor, - ], - layout=Layout( - flex_wrap="wrap", justify_content="space-between" - ), - ), - HBox( - [ - batch_size, - paddingX, - paddingY, - downsample_checkbox, - downsample_factor_text, - ], - layout=Layout( - flex_wrap="wrap", justify_content="space-between" - ), - ), - extra_options, - ], - layout=Layout(width="100%", height="100%"), - ) - ], - selected_index=None, - layout=Layout(width="100%"), - titles=("Other Alignment Options",), - ) - - box = VBox( - children=[ - hb1, - grid_methods_accordion, - save_options_accordion, - other_options_accordion, - hb2, - method_output, - output1, - output2, - ] - ) - - return box diff --git a/tomopyui/widgets/archive/alignment_stuff_old.py b/tomopyui/widgets/archive/alignment_stuff_old.py deleted file mode 100644 index 0021e1e..0000000 --- a/tomopyui/widgets/archive/alignment_stuff_old.py +++ /dev/null @@ -1,232 +0,0 @@ - # tool for loading the range from above. - - - load_range_from_above = Button( - description="Click to load projection range from plot tab.", - disabled=True, - button_style="info", - tooltip="Make sure to choose all of the buttons above before clicking this button", - icon="", - layout=Layout(width="95%", justify_content="center"), - ) - load_range_from_above.on_click(load_range_from_above_onclick) - - -def radio_align_full_partial(change): - if change.new == 1: - projection_range_x_alignment.disabled = False - projection_range_y_alignment.disabled = False - load_range_from_above.disabled = False - self.metadata["aligndata"] = True - load_range_from_above.description = ( - "Click to load projection range from above." - ) - load_range_from_above.icon = "" - elif change.new == 0: - if "range_y_link" in locals() or "range_y_link" in globals(): - range_y_link.unlink() - range_x_link.unlink() - load_range_from_above.button_style = "info" - load_range_from_above.description = ( - "Unlinked ranges. Enable partial range to link again." - ) - load_range_from_above.icon = "unlink" - projection_range_x_alignment.value = [0, tomo.prj_imgs.shape[2] - 1] - projection_range_x_alignment.disabled = True - projection_range_y_alignment.value = [0, tomo.prj_imgs.shape[1] - 1] - projection_range_y_alignment.disabled = True - load_range_from_above.disabled = True - self.metadata["aligndata"] = False - - - ############################# METHOD CHOOSER GRID ############################ - grid_alignment = GridspecLayout(2, 3) - # align_FBP_CUDA = Checkbox(description="FBP_CUDA") - # align_FBP_CUDA_option1 = Checkbox(description="option1", disabled=True) - # align_FBP_CUDA_option2 = Checkbox(description="option2", disabled=True) - # align_FBP_CUDA_option3 = Checkbox(description="option3", disabled=True) - # align_FBP_CUDA_option_list = [ - # align_FBP_CUDA_option1, - # align_FBP_CUDA_option2, - # align_FBP_CUDA_option3, - # ] - - align_SIRT_CUDA = Checkbox(description="SIRT_CUDA", value=1) - align_SIRT_CUDA_option1 = Checkbox(description="Faster", disabled=False) - align_SIRT_CUDA_option2 = Checkbox(description="Fastest", disabled=False) - align_SIRT_CUDA_option3 = Checkbox(description="option3", disabled=False) - align_SIRT_CUDA_option_list = [align_SIRT_CUDA_option1, align_SIRT_CUDA_option2] - - # align_SART_CUDA = Checkbox(description="SART_CUDA") - # align_SART_CUDA_option1 = Checkbox(description="option1", disabled=True) - # align_SART_CUDA_option2 = Checkbox(description="option2", disabled=True) - # align_SART_CUDA_option3 = Checkbox(description="option3", disabled=True) - # align_SART_CUDA_option_list = [ - # align_SART_CUDA_option1, - # align_SART_CUDA_option2, - # align_SART_CUDA_option3, - # ] - - # align_CGLS_CUDA = Checkbox(description="CGLS_CUDA") - # align_CGLS_CUDA_option1 = Checkbox(description="option1", disabled=True) - # align_CGLS_CUDA_option2 = Checkbox(description="option2", disabled=True) - # align_CGLS_CUDA_option3 = Checkbox(description="option3", disabled=True) - # align_CGLS_CUDA_option_list = [ - # align_CGLS_CUDA_option1, - # align_CGLS_CUDA_option2, - # align_CGLS_CUDA_option3, - # ] - - # align_MLEM_CUDA = Checkbox(description="MLEM_CUDA") - # align_MLEM_CUDA_option1 = Checkbox(description="option1", disabled=True) - # align_MLEM_CUDA_option2 = Checkbox(description="option2", disabled=True) - # align_MLEM_CUDA_option3 = Checkbox(description="option3", disabled=True) - # align_MLEM_CUDA_option_list = [ - # align_MLEM_CUDA_option1, - # align_MLEM_CUDA_option2, - # align_MLEM_CUDA_option3, - # ] - - align_method_list = [ - # align_FBP_CUDA, - align_SIRT_CUDA, - # align_SART_CUDA, - # align_CGLS_CUDA, - # align_MLEM_CUDA, - ] - - def toggle_on(change, opt_list, dictname): - if change.new == 1: - self.metadata["methods"][dictname] = {} - for option in opt_list: - option.disabled = False - if change.new == 0: - self.metadata["methods"].pop(dictname) - for option in opt_list: - option.value = 0 - option.disabled = True - - # align_FBP_CUDA.observe( - # functools.partial( - # toggle_on, opt_list=align_FBP_CUDA_option_list, dictname="FBP_CUDA" - # ), - # names=["value"], - # ) - align_SIRT_CUDA.observe( - functools.partial( - toggle_on, opt_list=align_SIRT_CUDA_option_list, dictname="SIRT_CUDA" - ), - names=["value"], - ) - # align_SART_CUDA.observe( - # functools.partial( - # toggle_on, opt_list=align_SART_CUDA_option_list, dictname="SART_CUDA" - # ), - # names=["value"], - # ) - # align_CGLS_CUDA.observe( - # functools.partial( - # toggle_on, opt_list=align_CGLS_CUDA_option_list, dictname="CGLS_CUDA" - # ), - # names=["value"], - # ) - # align_MLEM_CUDA.observe( - # functools.partial( - # toggle_on, opt_list=align_MLEM_CUDA_option_list, dictname="MLEM_CUDA" - # ), - # names=["value"], - # ) - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_dict_on_checkmark(change, opt_list, dictname): - self.metadata["methods"][dictname] = create_option_dictionary(opt_list) - - # Makes generator for mapping of options to observe functions. - - # list( - # ( - # opt.observe( - # functools.partial( - # create_dict_on_checkmark, - # opt_list=align_FBP_CUDA_option_list, - # dictname="FBP_CUDA", - # ), - # names=["value"], - # ) - # for opt in align_FBP_CUDA_option_list - # ) - # ) - list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=align_SIRT_CUDA_option_list, - dictname="SIRT_CUDA", - ), - names=["value"], - ) - for opt in align_SIRT_CUDA_option_list - ) - ) - # list( - # ( - # opt.observe( - # functools.partial( - # create_dict_on_checkmark, - # opt_list=align_SART_CUDA_option_list, - # dictname="SART_CUDA", - # ), - # names=["value"], - # ) - # for opt in align_SART_CUDA_option_list - # ) - # ) - # list( - # ( - # opt.observe( - # functools.partial( - # create_dict_on_checkmark, - # opt_list=align_CGLS_CUDA_option_list, - # dictname="CGLS_CUDA", - # ), - # names=["value"], - # ) - # for opt in align_CGLS_CUDA_option_list - # ) - # ) - # list( - # ( - # opt.observe( - # functools.partial( - # create_dict_on_checkmark, - # opt_list=align_MLEM_CUDA_option_list, - # dictname="MLEM_CUDA", - # ), - # names=["value"], - # ) - # for opt in align_MLEM_CUDA_option_list - # ) - # ) - - def fill_grid(method, opt_list, linenumber, grid): - grid[linenumber, 0] = method - i = 1 - for option in opt_list: - grid[linenumber, i] = option - i += 1 - - # fill_grid(align_FBP_CUDA, align_FBP_CUDA_option_list, 1, grid_alignment) - fill_grid(align_SIRT_CUDA, align_SIRT_CUDA_option_list, 1, grid_alignment) - # fill_grid(align_SART_CUDA, align_SART_CUDA_option_list, 3, grid_alignment) - # fill_grid(align_CGLS_CUDA, align_CGLS_CUDA_option_list, 4, grid_alignment) - # fill_grid(align_MLEM_CUDA, align_MLEM_CUDA_option_list, 5, grid_alignment) - - grid_column_headers = ["Method", "Option 1", "Option 2"] - for i, method in enumerate(grid_column_headers): - grid_alignment[0, i] = Label( - value=grid_column_headers[i], layout=Layout(justify_content="center") - ) \ No newline at end of file diff --git a/tomopyui/widgets/archive/generate_alignment_box.py b/tomopyui/widgets/archive/generate_alignment_box.py deleted file mode 100644 index f56a447..0000000 --- a/tomopyui/widgets/archive/generate_alignment_box.py +++ /dev/null @@ -1,470 +0,0 @@ -import tifffile as tf -from ipywidgets import * -import glob -from .debouncer import debounce -import functools -import json - - -def generate_alignment_box(recon_tomo_metadata): - - extend_description_style = {"description_width": "auto"} - fpath = recon_tomo_metadata["fpath"] - fname = recon_tomo_metadata["fname"] - recon_tomo_metadata["opts"] = {} - recon_tomo_metadata["methods"] = {} - recon_tomo_metadata["save_opts"] = {} - - # tomo_number = int(filter(str.isdigit, box_title)) - - radio_alignment = RadioButtons( - options=["Yes", "No"], - style=extend_description_style, - layout=Layout(width="20%"), - value="No", - ) - - radio_alignment_fulldataset = RadioButtons( - options=["Full", "Partial"], - style=extend_description_style, - layout=Layout(width="20%"), - disabled=True, - value="Full", - ) - - projection_range_x_recon = IntRangeSlider( - value=[0, 10], - min=0, - max=10, - step=1, - description="Projection X Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="100%"), - style=extend_description_style, - ) - - projection_range_y_recon = IntRangeSlider( - value=[0, 10], - min=0, - max=10, - step=1, - description="Projection Y Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="100%"), - style=extend_description_style, - ) - - def activate_box(change): - if change.new == 0: - radio_alignment_fulldataset.disabled = False - recon_tomo_metadata["reconstruct"] = True - recon_tomo_metadata["opts"] = {} - recon_tomo_metadata["methods"] = {} - recon_tomo_metadata["save_opts"] = {} - save_options_accordion.selected_index = 0 - # recon_tomo_metadata["methods"]["SIRT_CUDA"] = {} - options_accordion.selected_index = 0 - methods_accordion.selected_index = 0 - elif change.new == 1: - radio_alignment_fulldataset.disabled = True - projection_range_x_recon.disabled = True - projection_range_y_recon.disabled = True - recon_tomo_metadata["reconstruct"] = False - recon_tomo_metadata.pop("opts") - recon_tomo_metadata.pop("methods") - recon_tomo_metadata.pop("save_opts") - save_options_accordion.selected_index = None - options_accordion.selected_index = None - methods_accordion.selected_index = None - - def set_projection_ranges(sizeY, sizeX): - projection_range_x_recon.max = sizeX - 1 - projection_range_y_recon.max = sizeY - 1 - # projection_range_z_recon.max = sizeZ-1 - projection_range_x_recon.value = [0, sizeX - 1] - projection_range_y_recon.value = [0, sizeY - 1] - # projection_range_z_recon.value = [0, sizeZ-1] - recon_tomo_metadata["prj_range_x"] = projection_range_x_recon.value - recon_tomo_metadata["prj_range_y"] = projection_range_y_recon.value - - def load_tif_shape_tag(folder_import=False): - os.chdir(fpath) - tiff_count_in_folder = len(glob.glob1(fpath, "*.tif")) - global sizeY, sizeX - if folder_import: - _tomo = td.TomoData(metadata=recon_tomo_metadata) - size = _tomo.prj_imgs.shape - # sizeZ = size[0] - sizeY = size[1] - sizeX = size[2] - set_projection_ranges(sizeY, sizeX) - - else: - with tf.TiffFile(fname) as tif: - if tiff_count_in_folder > 50: - sizeX = tif.pages[0].tags["ImageWidth"].value - sizeY = tif.pages[0].tags["ImageLength"].value - # sizeZ = tiff_count_in_folder # can maybe use this later - else: - imagesize = tif.pages[0].tags["ImageDescription"] - size = json.loads(imagesize.value)["shape"] - sizeY = size[1] - sizeX = size[2] - set_projection_ranges(sizeY, sizeX) - - def load_npy_shape(): - os.chdir(fpath) - size = np.load(fname, mmap_mode="r").shape - global sizeY, sizeX - sizeY = size[1] - sizeX = size[2] - set_projection_ranges(sizeY, sizeX) - - def activate_full_partial(change): - if change.new == 1: - recon_tomo_metadata["partial"] = True - projection_range_x_recon.disabled = False - projection_range_y_recon.disabled = False - # projection_range_z_recon.disabled = False - if fname != "": - if fname.__contains__(".tif"): - load_tif_shape_tag() - elif recon_tomo_metadata["fname"].__contains__(".npy"): - load_npy_shape() - else: - load_tif_shape_tag(folder_import=True) - elif change.new == 0: - recon_tomo_metadata["partial"] = False - set_projection_ranges(sizeY, sizeX) - projection_range_x_recon.disabled = True - projection_range_y_recon.disabled = True - # projection_range_z_recon.disabled = True - - recon_tomo_metadata["partial"] = False - radio_alignment.observe(activate_box, names="index") - radio_alignment_fulldataset.observe(activate_full_partial, names="index") - - #### callbacks for projection range sliders - - @debounce(0.2) - def projection_range_x_update_dict(change): - recon_tomo_metadata["prj_range_x"] = change.new - - projection_range_x_recon.observe(projection_range_x_update_dict, "value") - - @debounce(0.2) - def projection_range_y_update_dict(change): - recon_tomo_metadata["prj_range_y"] = change.new - - projection_range_y_recon.observe(projection_range_y_update_dict, "value") - - #### downsampling - recon_tomo_metadata["opts"]["downsample"] = False - recon_tomo_metadata["opts"]["downsample_factor"] = 1 - - def downsample_turn_on(change): - if change.new == 1: - recon_tomo_metadata["opts"]["downsample"] = True - recon_tomo_metadata["opts"][ - "downsample_factor" - ] = downsample_factor_text.value - downsample_factor_text.disabled = False - if change.new == 0: - recon_tomo_metadata["opts"]["downsample"] = False - recon_tomo_metadata["opts"]["downsample_factor"] = 1 - downsample_factor_text.value = 1 - downsample_factor_text.disabled = True - - downsample_checkbox = Checkbox(description="Downsample?", value=0) - downsample_checkbox.observe(downsample_turn_on) - - def downsample_factor_update_dict(change): - recon_tomo_metadata["opts"]["downsample_factor"] = change.new - - downsample_factor_text = BoundedFloatText( - value=1, - min=0.001, - max=1.0, - description="Downsampling factor:", - disabled=True, - style=extend_description_style, - ) - - downsample_factor_text.observe(downsample_factor_update_dict, names="value") - - #### radio descriptions - - radio_description = "Would you like to reconstruct this dataset?" - partial_radio_description = ( - "Would you like to use the full dataset, or a partial dataset?" - ) - radio_description = HTML( - value="

" - + radio_description - + "

" - ) - partial_radio_description = HTML( - value="

" - + partial_radio_description - + "

" - ) - - #### Saving options - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_save_dict_on_checkmark(change, opt_list): - recon_tomo_metadata["save_opts"] = create_option_dictionary(opt_list) - - save_opts = ["tomo_before", "recon", "tiff", "npy"] - recon_tomo_metadata["save_opts"] = {key: None for key in save_opts} - - def create_save_checkboxes(opts): - checkboxes = [ - Checkbox(description=opt, style=extend_description_style,) for opt in opts - ] - return checkboxes - - save_checkboxes = create_save_checkboxes(save_opts) - - list( - ( - opt.observe( - functools.partial( - create_save_dict_on_checkmark, opt_list=save_checkboxes, - ), - names=["value"], - ) - for opt in save_checkboxes - ) - ) - - save_hbox = HBox( - save_checkboxes, - layout=Layout(flex_wrap="wrap", justify_content="space-between"), - ) - - save_options_accordion = Accordion( - children=[save_hbox], - selected_index=None, - layout=Layout(width="100%"), - titles=("Save Options",), - ) - - #### Methods checkboxes - - recon_FP_CUDA = Checkbox(description="FP_CUDA") - recon_BP_CUDA = Checkbox(description="BP_CUDA") - recon_FBP_CUDA = Checkbox(description="FBP_CUDA") - ### !!!!!!!! sirt cuda has options - maybe make them into a radio chooser - recon_SIRT_CUDA = Checkbox(description="SIRT_CUDA") - recon_SIRT_CUDA_option1 = Checkbox(description="SIRT Plugin-Faster", disabled=False) - recon_SIRT_CUDA_option2 = Checkbox(description="SIRT 3D-Fastest", disabled=False) - recon_SIRT_CUDA_option_list = [ - recon_SIRT_CUDA_option1, - recon_SIRT_CUDA_option2, - ] - recon_SIRT_CUDA_checkboxes = [ - recon_SIRT_CUDA, - recon_SIRT_CUDA_option1, - recon_SIRT_CUDA_option2, - ] - recon_SART_CUDA = Checkbox(description="SART_CUDA") - recon_CGLS_CUDA = Checkbox(description="CGLS_CUDA") - recon_MLEM_CUDA = Checkbox(description="MLEM_CUDA") - recon_method_list = [ - recon_FP_CUDA, - recon_BP_CUDA, - recon_FBP_CUDA, - recon_SART_CUDA, - recon_CGLS_CUDA, - recon_MLEM_CUDA, - ] - - ####### Toggling on options if you select SIRT. Copy the observe function below - ######## if more options are needed. - - def toggle_on(change, opt_list, dictname): - if change.new == 1: - recon_tomo_metadata["methods"][dictname] = {} - for option in opt_list: - option.disabled = False - if change.new == 0: - recon_tomo_metadata["methods"].pop(dictname) - for option in opt_list: - option.value = 0 - option.disabled = True - - recon_SIRT_CUDA.observe( - functools.partial( - toggle_on, opt_list=recon_SIRT_CUDA_option_list, dictname="SIRT_CUDA" - ), - names=["value"], - ) - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_dict_on_checkmark(change, opt_list, dictname): - recon_tomo_metadata["methods"][dictname] = create_option_dictionary(opt_list) - - def create_dict_on_checkmark_no_options(change): - if change.new == True: - recon_tomo_metadata["methods"][change.owner.description] = {} - if change.new == False: - recon_tomo_metadata["methods"].pop(change.owner.description) - - [ - checkbox.observe(create_dict_on_checkmark_no_options) - for checkbox in recon_method_list - ] - # Makes generator for mapping of options to observe functions. - # If other options needed for other reconstruction methods, use similar - - list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=recon_SIRT_CUDA_option_list, - dictname="SIRT_CUDA", - ), - names=["value"], - ) - for opt in recon_SIRT_CUDA_option_list - ) - ) - - list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=recon_SIRT_CUDA_option_list, - dictname="SIRT_CUDA", - ), - names=["value"], - ) - for opt in recon_SIRT_CUDA_option_list - ) - ) - - sirt_hbox = HBox(recon_SIRT_CUDA_checkboxes) - - recon_method_box = VBox( - [ - VBox(recon_method_list, layout=widgets.Layout(flex_flow="row wrap")), - sirt_hbox, - ] - ) - - methods_accordion = Accordion( - children=[recon_method_box], selected_index=None, titles=("Methods",) - ) - - #### options - - # number of iterations - recon_tomo_metadata["opts"]["num_iter"] = 20 - - def update_num_iter_dict(change): - recon_tomo_metadata["opts"]["num_iter"] = change.new - - number_of_recon_iterations = IntText( - description="Number of Iterations: ", style=extend_description_style, value=20, - ) - number_of_recon_iterations.observe(update_num_iter_dict, names="value") - - # center of rotation - recon_tomo_metadata["opts"]["center"] = 0 - - def update_center_of_ration_dict(change): - recon_tomo_metadata["opts"]["center"] = change.new - - center_of_rotation = IntText( - description="Center of Rotation: ", - style=extend_description_style, - value=recon_tomo_metadata["opts"]["center"], - ) - center_of_rotation.observe(update_center_of_ration_dict, names="value") - - recon_tomo_metadata["opts"]["extra_options"] = None - - def update_extra_options_dict(change): - recon_tomo_metadata["opts"]["extra_options"] = change.new - - extra_options = Text( - description="Extra options: ", - placeholder='{"MinConstraint": 0}', - style=extend_description_style, - ) - extra_options.observe(update_extra_options_dict, names="value") - - downsample_hb = HBox( - [downsample_checkbox, downsample_factor_text], - layout=Layout(flex_wrap="wrap", justify_content="space-between"), - ) - - options_accordion = Accordion( - children=[ - HBox( - [ - number_of_recon_iterations, - center_of_rotation, - downsample_checkbox, - downsample_factor_text, - extra_options, - ], - layout=Layout(flex_flow="row wrap", justify_content="space-between"), - ), - ], - selected_index=None, - layout=Layout(width="100%"), - titles=("Options",), - ) - - #### putting it all together - sliders_box = VBox( - [projection_range_x_recon, projection_range_y_recon,], - layout=Layout(width="30%"), - justify_content="center", - align_items="space-between", - ) - - recon_initialization_box = HBox( - [ - radio_description, - radio_alignment, - partial_radio_description, - radio_alignment_fulldataset, - sliders_box, - ], - layout=Layout( - width="auto", - justify_content="center", - align_items="center", - flex="flex-grow", - ), - ) - - recon_dashboard = VBox( - [ - recon_initialization_box, - options_accordion, - methods_accordion, - save_options_accordion, - ] - ) - - return recon_dashboard diff --git a/tomopyui/widgets/archive/multiple_recon.py b/tomopyui/widgets/archive/multiple_recon.py deleted file mode 100644 index 9e4c9ad..0000000 --- a/tomopyui/widgets/archive/multiple_recon.py +++ /dev/null @@ -1,470 +0,0 @@ -import tifffile as tf -from ipywidgets import * -import glob -from .debouncer import debounce -import functools -import json - - -def make_recon_tab(recon_tomo_metadata): - - extend_description_style = {"description_width": "auto"} - fpath = recon_tomo_metadata["fpath"] - fname = recon_tomo_metadata["fname"] - recon_tomo_metadata["opts"] = {} - recon_tomo_metadata["methods"] = {} - recon_tomo_metadata["save_opts"] = {} - - # tomo_number = int(filter(str.isdigit, box_title)) - - radio_recon = RadioButtons( - options=["Yes", "No"], - style=extend_description_style, - layout=Layout(width="20%"), - value="No", - ) - - radio_recon_fulldataset = RadioButtons( - options=["Full", "Partial"], - style=extend_description_style, - layout=Layout(width="20%"), - disabled=True, - value="Full", - ) - - projection_range_x_recon = IntRangeSlider( - value=[0, 10], - min=0, - max=10, - step=1, - description="Projection X Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="100%"), - style=extend_description_style, - ) - - projection_range_y_recon = IntRangeSlider( - value=[0, 10], - min=0, - max=10, - step=1, - description="Projection Y Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="100%"), - style=extend_description_style, - ) - - def activate_box(change): - if change.new == 0: - radio_recon_fulldataset.disabled = False - recon_tomo_metadata["reconstruct"] = True - recon_tomo_metadata["opts"] = {} - recon_tomo_metadata["methods"] = {} - recon_tomo_metadata["save_opts"] = {} - save_options_accordion.selected_index = 0 - # recon_tomo_metadata["methods"]["SIRT_CUDA"] = {} - options_accordion.selected_index = 0 - methods_accordion.selected_index = 0 - elif change.new == 1: - radio_recon_fulldataset.disabled = True - projection_range_x_recon.disabled = True - projection_range_y_recon.disabled = True - recon_tomo_metadata["reconstruct"] = False - recon_tomo_metadata.pop("opts") - recon_tomo_metadata.pop("methods") - recon_tomo_metadata.pop("save_opts") - save_options_accordion.selected_index = None - options_accordion.selected_index = None - methods_accordion.selected_index = None - - def set_projection_ranges(sizeY, sizeX): - projection_range_x_recon.max = sizeX - 1 - projection_range_y_recon.max = sizeY - 1 - # projection_range_z_recon.max = sizeZ-1 - projection_range_x_recon.value = [0, sizeX - 1] - projection_range_y_recon.value = [0, sizeY - 1] - # projection_range_z_recon.value = [0, sizeZ-1] - recon_tomo_metadata["prj_range_x"] = projection_range_x_recon.value - recon_tomo_metadata["prj_range_y"] = projection_range_y_recon.value - - def load_tif_shape_tag(folder_import=False): - os.chdir(fpath) - tiff_count_in_folder = len(glob.glob1(fpath, "*.tif")) - global sizeY, sizeX - if folder_import: - _tomo = td.TomoData(metadata=recon_tomo_metadata) - size = _tomo.prj_imgs.shape - # sizeZ = size[0] - sizeY = size[1] - sizeX = size[2] - set_projection_ranges(sizeY, sizeX) - - else: - with tf.TiffFile(fname) as tif: - if tiff_count_in_folder > 50: - sizeX = tif.pages[0].tags["ImageWidth"].value - sizeY = tif.pages[0].tags["ImageLength"].value - # sizeZ = tiff_count_in_folder # can maybe use this later - else: - imagesize = tif.pages[0].tags["ImageDescription"] - size = json.loads(imagesize.value)["shape"] - sizeY = size[1] - sizeX = size[2] - set_projection_ranges(sizeY, sizeX) - - def load_npy_shape(): - os.chdir(fpath) - size = np.load(fname, mmap_mode="r").shape - global sizeY, sizeX - sizeY = size[1] - sizeX = size[2] - set_projection_ranges(sizeY, sizeX) - - def activate_full_partial(change): - if change.new == 1: - recon_tomo_metadata["partial"] = True - projection_range_x_recon.disabled = False - projection_range_y_recon.disabled = False - # projection_range_z_recon.disabled = False - if fname != "": - if fname.__contains__(".tif"): - load_tif_shape_tag() - elif recon_tomo_metadata["fname"].__contains__(".npy"): - load_npy_shape() - else: - load_tif_shape_tag(folder_import=True) - elif change.new == 0: - recon_tomo_metadata["partial"] = False - set_projection_ranges(sizeY, sizeX) - projection_range_x_recon.disabled = True - projection_range_y_recon.disabled = True - # projection_range_z_recon.disabled = True - - recon_tomo_metadata["partial"] = False - radio_recon.observe(activate_box, names="index") - radio_recon_fulldataset.observe(activate_full_partial, names="index") - - #### callbacks for projection range sliders - - @debounce(0.2) - def projection_range_x_update_dict(change): - recon_tomo_metadata["prj_range_x"] = change.new - - projection_range_x_recon.observe(projection_range_x_update_dict, "value") - - @debounce(0.2) - def projection_range_y_update_dict(change): - recon_tomo_metadata["prj_range_y"] = change.new - - projection_range_y_recon.observe(projection_range_y_update_dict, "value") - - #### downsampling - recon_tomo_metadata["opts"]["downsample"] = False - recon_tomo_metadata["opts"]["downsample_factor"] = 1 - - def downsample_turn_on(change): - if change.new == 1: - recon_tomo_metadata["opts"]["downsample"] = True - recon_tomo_metadata["opts"][ - "downsample_factor" - ] = downsample_factor_text.value - downsample_factor_text.disabled = False - if change.new == 0: - recon_tomo_metadata["opts"]["downsample"] = False - recon_tomo_metadata["opts"]["downsample_factor"] = 1 - downsample_factor_text.value = 1 - downsample_factor_text.disabled = True - - downsample_checkbox = Checkbox(description="Downsample?", value=0) - downsample_checkbox.observe(downsample_turn_on) - - def downsample_factor_update_dict(change): - recon_tomo_metadata["opts"]["downsample_factor"] = change.new - - downsample_factor_text = BoundedFloatText( - value=1, - min=0.001, - max=1.0, - description="Downsampling factor:", - disabled=True, - style=extend_description_style, - ) - - downsample_factor_text.observe(downsample_factor_update_dict, names="value") - - #### radio descriptions - - radio_description = "Would you like to reconstruct this dataset?" - partial_radio_description = ( - "Would you like to use the full dataset, or a partial dataset?" - ) - radio_description = HTML( - value="

" - + radio_description - + "

" - ) - partial_radio_description = HTML( - value="

" - + partial_radio_description - + "

" - ) - - #### Saving options - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_save_dict_on_checkmark(change, opt_list): - recon_tomo_metadata["save_opts"] = create_option_dictionary(opt_list) - - save_opts = ["tomo_before", "recon", "tiff", "npy"] - recon_tomo_metadata["save_opts"] = {key: None for key in save_opts} - - def create_save_checkboxes(opts): - checkboxes = [ - Checkbox(description=opt, style=extend_description_style,) for opt in opts - ] - return checkboxes - - save_checkboxes = create_save_checkboxes(save_opts) - - list( - ( - opt.observe( - functools.partial( - create_save_dict_on_checkmark, opt_list=save_checkboxes, - ), - names=["value"], - ) - for opt in save_checkboxes - ) - ) - - save_hbox = HBox( - save_checkboxes, - layout=Layout(flex_wrap="wrap", justify_content="space-between"), - ) - - save_options_accordion = Accordion( - children=[save_hbox], - selected_index=None, - layout=Layout(width="100%"), - titles=("Save Options",), - ) - - #### Methods checkboxes - - recon_FP_CUDA = Checkbox(description="FP_CUDA") - recon_BP_CUDA = Checkbox(description="BP_CUDA") - recon_FBP_CUDA = Checkbox(description="FBP_CUDA") - ### !!!!!!!! sirt cuda has options - maybe make them into a radio chooser - recon_SIRT_CUDA = Checkbox(description="SIRT_CUDA") - recon_SIRT_CUDA_option1 = Checkbox(description="SIRT Plugin-Faster", disabled=False) - recon_SIRT_CUDA_option2 = Checkbox(description="SIRT 3D-Fastest", disabled=False) - recon_SIRT_CUDA_option_list = [ - recon_SIRT_CUDA_option1, - recon_SIRT_CUDA_option2, - ] - recon_SIRT_CUDA_checkboxes = [ - recon_SIRT_CUDA, - recon_SIRT_CUDA_option1, - recon_SIRT_CUDA_option2, - ] - recon_SART_CUDA = Checkbox(description="SART_CUDA") - recon_CGLS_CUDA = Checkbox(description="CGLS_CUDA") - recon_MLEM_CUDA = Checkbox(description="MLEM_CUDA") - recon_method_list = [ - recon_FP_CUDA, - recon_BP_CUDA, - recon_FBP_CUDA, - recon_SART_CUDA, - recon_CGLS_CUDA, - recon_MLEM_CUDA, - ] - - ####### Toggling on options if you select SIRT. Copy the observe function below - ######## if more options are needed. - - def toggle_on(change, opt_list, dictname): - if change.new == 1: - recon_tomo_metadata["methods"][dictname] = {} - for option in opt_list: - option.disabled = False - if change.new == 0: - recon_tomo_metadata["methods"].pop(dictname) - for option in opt_list: - option.value = 0 - option.disabled = True - - recon_SIRT_CUDA.observe( - functools.partial( - toggle_on, opt_list=recon_SIRT_CUDA_option_list, dictname="SIRT_CUDA" - ), - names=["value"], - ) - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_dict_on_checkmark(change, opt_list, dictname): - recon_tomo_metadata["methods"][dictname] = create_option_dictionary(opt_list) - - def create_dict_on_checkmark_no_options(change): - if change.new == True: - recon_tomo_metadata["methods"][change.owner.description] = {} - if change.new == False: - recon_tomo_metadata["methods"].pop(change.owner.description) - - [ - checkbox.observe(create_dict_on_checkmark_no_options) - for checkbox in recon_method_list - ] - # Makes generator for mapping of options to observe functions. - # If other options needed for other reconstruction methods, use similar - - list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=recon_SIRT_CUDA_option_list, - dictname="SIRT_CUDA", - ), - names=["value"], - ) - for opt in recon_SIRT_CUDA_option_list - ) - ) - - list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=recon_SIRT_CUDA_option_list, - dictname="SIRT_CUDA", - ), - names=["value"], - ) - for opt in recon_SIRT_CUDA_option_list - ) - ) - - sirt_hbox = HBox(recon_SIRT_CUDA_checkboxes) - - recon_method_box = VBox( - [ - VBox(recon_method_list, layout=widgets.Layout(flex_flow="row wrap")), - sirt_hbox, - ] - ) - - methods_accordion = Accordion( - children=[recon_method_box], selected_index=None, titles=("Methods",) - ) - - #### options - - # number of iterations - recon_tomo_metadata["opts"]["num_iter"] = 20 - - def update_num_iter_dict(change): - recon_tomo_metadata["opts"]["num_iter"] = change.new - - number_of_recon_iterations = IntText( - description="Number of Iterations: ", style=extend_description_style, value=20, - ) - number_of_recon_iterations.observe(update_num_iter_dict, names="value") - - # center of rotation - recon_tomo_metadata["opts"]["center"] = 0 - - def update_center_of_ration_dict(change): - recon_tomo_metadata["opts"]["center"] = change.new - - center_of_rotation = IntText( - description="Center of Rotation: ", - style=extend_description_style, - value=recon_tomo_metadata["opts"]["center"], - ) - center_of_rotation.observe(update_center_of_ration_dict, names="value") - - recon_tomo_metadata["opts"]["extra_options"] = None - - def update_extra_options_dict(change): - recon_tomo_metadata["opts"]["extra_options"] = change.new - - extra_options = Text( - description="Extra options: ", - placeholder='{"MinConstraint": 0}', - style=extend_description_style, - ) - extra_options.observe(update_extra_options_dict, names="value") - - downsample_hb = HBox( - [downsample_checkbox, downsample_factor_text], - layout=Layout(flex_wrap="wrap", justify_content="space-between"), - ) - - options_accordion = Accordion( - children=[ - HBox( - [ - number_of_recon_iterations, - center_of_rotation, - downsample_checkbox, - downsample_factor_text, - extra_options, - ], - layout=Layout(flex_flow="row wrap", justify_content="space-between"), - ), - ], - selected_index=None, - layout=Layout(width="100%"), - titles=("Options",), - ) - - #### putting it all together - sliders_box = VBox( - [projection_range_x_recon, projection_range_y_recon,], - layout=Layout(width="30%"), - justify_content="center", - align_items="space-between", - ) - - recon_initialization_box = HBox( - [ - radio_description, - radio_recon, - partial_radio_description, - radio_recon_fulldataset, - sliders_box, - ], - layout=Layout( - width="auto", - justify_content="center", - align_items="center", - flex="flex-grow", - ), - ) - - recon_dashboard = VBox( - [ - recon_initialization_box, - options_accordion, - methods_accordion, - save_options_accordion, - ] - ) - - return recon_dashboard diff --git a/tomopyui/widgets/archive/plot_imported_data.py b/tomopyui/widgets/archive/plot_imported_data.py deleted file mode 100644 index 980b5f9..0000000 --- a/tomopyui/widgets/archive/plot_imported_data.py +++ /dev/null @@ -1,230 +0,0 @@ -from ipywidgets import * -from matplotlib import animation -from matplotlib import pyplot as plt -from IPython.display import HTML -import plotly.express as px -from skimage.transform import rescale - - -def plot_imported_data(tomodata, widget_linker): - - extend_description_style = {"description_width": "auto"} - plot_output = Output() - movie_output = Output() - - def plot_projections(tomodata, range_x, range_y, range_z, skip, scale_factor): - volume = tomodata.prj_imgs[ - range_z[0] : range_z[1] : skip, - range_y[0] : range_y[1] : 1, - range_x[0] : range_x[1] : 1, - ].copy() - volume_rescaled = rescale( - volume, (1, scale_factor, scale_factor), anti_aliasing=False - ) - fig = px.imshow( - volume_rescaled, - facet_col=0, - facet_col_wrap=5, - binary_string=True, - height=2000, - facet_row_spacing=0.001, - ) - rangez = range(range_z[0], range_z[1], skip) - angles = np.around(tomodata.theta * 180 / np.pi, decimals=1) - for i, j in enumerate(rangez): - for k in range(len(rangez)): - if fig.layout.annotations[k]["text"] == "facet_col=" + str(i): - fig.layout.annotations[k]["text"] = ( - "Proj:" + " " + str(j) + "
Angle:" + "" + str(angles[j]) - ) - fig.layout.annotations[k]["y"] = ( - fig.layout.annotations[k]["y"] - 0.02 - ) - break - fig.update_layout( - # margin=dict(autoexpand=False), - font_family="Helvetica", - font_size=30, - # margin=dict(l=5, r=5, t=5, b=5), - paper_bgcolor="LightSteelBlue", - ) - fig.update_xaxes(showticklabels=False).update_yaxes(showticklabels=False) - # fig.for_each_annotation(lambda a: a.update(text='')) - display(fig) - - def plot_projection_movie(tomodata, range_x, range_y, range_z, skip, scale_factor): - - frames = [] - animSliceNos = range(range_z[0], range_z[1], skip) - volume = tomodata.prj_imgs[ - range_z[0] : range_z[1] : skip, - range_y[0] : range_y[1] : 1, - range_x[0] : range_x[1] : 1, - ] - volume_rescaled = rescale( - volume, (1, scale_factor, scale_factor), anti_aliasing=False - ) - fig, ax = plt.subplots(figsize=(10, 10)) - for i in range(len(animSliceNos)): - frames.append([ax.imshow(volume_rescaled[i], cmap="viridis")]) - ani = animation.ArtistAnimation( - fig, frames, interval=50, blit=True, repeat_delay=100 - ) - # plt.close() - display(HTML(ani.to_jshtml())) - - def update_projection_plot_on_click(button_click): - plot_output.clear_output() - with plot_output: - update_plot_button.button_style = "info" - update_plot_button.icon = "fas fa-cog fa-spin fa-lg" - update_plot_button.description = "Making a plot." - plot_projections( - tomodata, - projection_range_x.value, - projection_range_y.value, - projection_range_theta.value, - skip_theta.value, - 0.1, - ) - update_plot_button.button_style = "success" - update_plot_button.icon = "square-check" - update_plot_button.description = "Do it again?" - - def create_projection_movie_on_click(button_click): - movie_output.clear_output() - with movie_output: - create_movie_button.button_style = "info" - create_movie_button.icon = "fas fa-cog fa-spin fa-lg" - create_movie_button.description = "Making a movie." - plot_projection_movie( - tomodata, - projection_range_x.value, - projection_range_y.value, - projection_range_theta.value, - skip_theta.value, - 0.1, - ) - create_movie_button.button_style = "success" - create_movie_button.icon = "square-check" - create_movie_button.description = "Do it again?" - - projection_range_x = IntRangeSlider( - value=[0, tomodata.prj_imgs.shape[2] - 1], - min=0, - max=tomodata.prj_imgs.shape[2] - 1, - step=1, - description="Projection X Range:", - disabled=False, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="70%"), - style=extend_description_style, - ) - - projection_range_y = IntRangeSlider( - value=[0, tomodata.prj_imgs.shape[1] - 1], - min=0, - max=tomodata.prj_imgs.shape[1] - 1, - step=1, - description="Projection Y Range:", - disabled=False, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="70%"), - style=extend_description_style, - ) - - projection_range_theta = IntRangeSlider( - value=[0, tomodata.prj_imgs.shape[0] - 1], - min=0, - max=tomodata.prj_imgs.shape[0] - 1, - step=1, - description="Projection Z Range:", - disabled=False, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="70%"), - style=extend_description_style, - ) - - skip_theta = IntSlider( - value=20, - min=1, - max=50, - step=1, - description="Skipped range in z:", - disabled=False, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="70%"), - style=extend_description_style, - ) - - create_movie_button = Button( - description="Click me to create a movie", layout=Layout(width="auto") - ) - create_movie_button.on_click(create_projection_movie_on_click) - update_plot_button = Button( - description="Click me to create a plot", layout=Layout(width="auto") - ) - update_plot_button.on_click(update_projection_plot_on_click) - - movie_output.layout = Layout(width="100%", height="100%", align_items="center") - plot_output.layout = Layout(width="100%", height="100%", align_items="center") - plot_box_layout = Layout( - border="3px solid blue", - width="100%", - height="auto", - align_items="center", - justify_content="center", - ) - grid_plot = GridBox( - children=[update_plot_button, plot_output], - layout=Layout( - width="100%", - grid_template_rows="auto", - grid_template_columns="15% 84%", - grid_template_areas=""" - "update_plot_button plot_output" - """, - ), - ) - grid_movie = GridBox( - children=[create_movie_button, movie_output], - layout=Layout( - width="100%", - grid_template_rows="auto", - grid_template_columns="15% 84%", - grid_template_areas=""" - "create_movie_button movie_output" - """, - ), - ) - - plot_vbox = VBox( - [ - projection_range_x, - projection_range_y, - projection_range_theta, - skip_theta, - grid_movie, - grid_plot, - ], - layout=plot_box_layout, - ) - - widget_linker["projection_range_x_movie"] = projection_range_x - widget_linker["projection_range_y_movie"] = projection_range_y - widget_linker["projection_range_theta_movie"] = projection_range_theta - widget_linker["skip_theta_movie"] = skip_theta - - return plot_vbox diff --git a/tomopyui/widgets/archive/plotly_2d_plot_example.py b/tomopyui/widgets/archive/plotly_2d_plot_example.py deleted file mode 100644 index 9a5ed33..0000000 --- a/tomopyui/widgets/archive/plotly_2d_plot_example.py +++ /dev/null @@ -1,87 +0,0 @@ -# From -import pandas as pd -import plotly.express as px - - -def handle_elem_change(change): - with rangewidg.hold_trait_notifications(): # This is because if you do't put it it set max, - - rangewidg.max = big_grid[ - dropm_elem.value - ].max() # and if max is < min he freaks out. Like this he first - rangewidg.min = big_grid[ - dropm_elem.value - ].min() # set everything and then send the eventual errors notification. - rangewidg.value = [ - big_grid[dropm_elem.value].min(), - big_grid[dropm_elem.value].max(), - ] - - -def plot_change(change): - df = big_grid[big_grid["id_col"].isin(dropm_id.value)] - output.clear_output(wait=True) - with output: - fig = px.scatter( - df, - x="coord1", - y="coord2", - color=dropm_elem.value, - hover_data=["info"], - width=500, - height=800, - color_continuous_scale="Turbo", - range_color=rangewidg.value, - ) - fig.show() - - -# define the widgets dropm_elem and rangewidg, which are the possible df.columns and the color range -# used in the function plot. -big_grid = pd.DataFrame( - data=dict( - id_col=[1, 2, 3, 4, 5], - col1=[0.1, 0.2, 0.3, 0.4, 0.5], - col2=[10, 20, 30, 40, 50], - coord1=[6, 7, 8, 9, 10], - coord2=[6, 7, 8, 9, 10], - info=["info1", "info2", "info3", "info4", "info5",], - ) -) -list_elem = ["col1", "col2", "info"] -list_id = big_grid.id_col.values - - -dropm_elem = widgets.Dropdown( - options=list_elem -) # creates a widget dropdown with all the _ppms -dropm_id = widgets.SelectMultiple( - options=list_id, description="Active Jobs", disabled=False -) - -rangewidg = widgets.FloatRangeSlider( - value=[big_grid[dropm_elem.value].min(), big_grid[dropm_elem.value].max()], - min=big_grid[dropm_elem.value].min(), - max=big_grid[dropm_elem.value].max(), - step=0.001, - readout_format=".3f", - description="Color Scale Range", - continuous_update=False, -) -output = widgets.Output() -# this line is crucial, it basically says: Whenever you move the dropdown menu widget, call the function -# #handle_elem_change, which will in turn update the values of rangewidg -dropm_elem.observe(handle_elem_change, names="value") -dropm_elem.observe(plot_change, names="value") -dropm_id.observe(plot_change, names="value") -rangewidg.observe(plot_change, names="value") - -# # #this line is also crucial, it links the widgets dropmenu and rangewidg with the function plot, assigning -# # #to elem and to rang (parameters of function plot) the values of dropmenu and rangewidg - -left_box = widgets.VBox([output]) -right_box = widgets.VBox([dropm_elem, rangewidg, dropm_id]) -tbox = widgets.HBox([left_box, right_box]) -# widgets.interact(plot,elem=dropm_elem,rang=rangewidg) - -display(tbox) diff --git a/tomopyui/widgets/archive/plotly_3d_example.py b/tomopyui/widgets/archive/plotly_3d_example.py deleted file mode 100644 index ef4715c..0000000 --- a/tomopyui/widgets/archive/plotly_3d_example.py +++ /dev/null @@ -1,106 +0,0 @@ -# This is from https://plotly.com/python/visualizing-mri-volume-slices/ - - -import time -import numpy as np - -from skimage import io - -# vol = io.imread("https://s3.amazonaws.com/assets.datacamp.com/blog_assets/attention-mri.tif") -# volume = vol.T -volume = tomo_norm_mlog.prj_imgs[0:100] -volume = tomopy.misc.morph.downsample(vol, level=2, axis=1) -volume = tomopy.misc.morph.downsample(vol, level=2, axis=2) -# volume = tomo_norm_mlog.prj_imgs[0:10] -r, c = volume[0].shape - -# Define frames -import plotly.graph_objects as go - -nb_frames = volume.shape[0] - -fig = go.Figure( - frames=[ - go.Frame( - data=go.Surface( - z=(6.7 - k * 0.1) * np.ones((r, c)), - surfacecolor=np.flipud(volume[9 - k]), - cmin=0, - cmax=1, - ), - name=str( - k - ), # you need to name the frame for the animation to behave properly - ) - for k in range(nb_frames) - ] -) - -# Add data to be displayed before animation starts -fig.add_trace( - go.Surface( - z=6.7 * np.ones((r, c)), - surfacecolor=np.flipud(volume[99]), - colorscale="Gray", - cmin=0, - cmax=1, - colorbar=dict(thickness=20, ticklen=4), - ) -) - - -def frame_args(duration): - return { - "frame": {"duration": duration}, - "mode": "immediate", - "fromcurrent": True, - "transition": {"duration": duration, "easing": "linear"}, - } - - -sliders = [ - { - "pad": {"b": 10, "t": 60}, - "len": 0.9, - "x": 0.1, - "y": 0, - "steps": [ - {"args": [[f.name], frame_args(0)], "label": str(k), "method": "animate",} - for k, f in enumerate(fig.frames) - ], - } -] - -# Layout -fig.update_layout( - title="Slices in volumetric data", - width=600, - height=600, - scene=dict( - zaxis=dict(range=[-0.1, 6.8], autorange=False), aspectratio=dict(x=1, y=1, z=1), - ), - updatemenus=[ - { - "buttons": [ - { - "args": [None, frame_args(50)], - "label": "▶", # play symbol - "method": "animate", - }, - { - "args": [[None], frame_args(0)], - "label": "◼", # pause symbol - "method": "animate", - }, - ], - "direction": "left", - "pad": {"r": 10, "t": 70}, - "type": "buttons", - "x": 0.1, - "y": 0, - } - ], - sliders=sliders, -) - -fig.show() diff --git a/tomopyui/widgets/archive/recon_box.py b/tomopyui/widgets/archive/recon_box.py deleted file mode 100644 index 8dadad2..0000000 --- a/tomopyui/widgets/archive/recon_box.py +++ /dev/null @@ -1,462 +0,0 @@ -from ipywidgets import * -import functools -import tomopy.data.tomodata as td -from .plot_aligned_data import plot_aligned_data - -# TODO: This is very disorganized. Try to bring some order/organization. - - -def recon_dashboard( - reconmetadata, - generalmetadata, - importmetadata, - alignmentmetadata, - alignmentdata, - aligned_tomo_list, - widget_linker, -): - plot_vbox, recon_files = plot_aligned_data( - reconmetadata, - alignmentmetadata, - importmetadata, - generalmetadata, - alignmentdata, - widget_linker, - ) - - main_logger = generalmetadata["main_logger"] - main_handler = generalmetadata["main_handler"] - - # projection_range_x_movie = widget_linker["projection_range_x_movie"] - # projection_range_y_movie = widget_linker["projection_range_y_movie"] - # projection_range_theta_movie = widget_linker["projection_range_theta_movie"] - # skip_theta_movie = widget_linker["skip_theta_movie"] - - extend_description_style = {"description_width": "auto"} - - ############ Perform recon? Y/N button - radio_recon = RadioButtons( - options=["Yes", "No"], - style=extend_description_style, - layout=Layout(width="20%"), - value="No", - ) - ############ Full dataset? Full/partial radio - radio_recon_fulldataset = RadioButtons( - options=["Full", "Partial"], - style=extend_description_style, - layout=Layout(width="20%"), - disabled=True, - value="Full", - ) - - ############ If partial, use sliders here - projection_range_x_recon = IntRangeSlider( - value=[0, tomo.prj_imgs.shape[2] - 1], - min=0, - max=tomo.prj_imgs.shape[2] - 1, - step=1, - description="Projection X Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="100%"), - style=extend_description_style, - ) - - projection_range_y_recon = IntRangeSlider( - value=[0, tomo.prj_imgs.shape[1] - 1], - min=0, - max=tomo.prj_imgs.shape[1] - 1, - step=1, - description="Projection Y Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="100%"), - style=extend_description_style, - ) - - load_range_from_above = Button( - description="Click to load projection range from above.", - disabled=True, - button_style="info", - tooltip="Make sure to choose all of the buttons above before clicking this button", - icon="", - layout=Layout(width="95%", justify_content="center"), - ) - - number_of_recon_iterations = IntText( - description="Number of Iterations: ", style=extend_description_style, value=20, - ) - center_of_rotation = IntText( - description="Center of Rotation: ", - style=extend_description_style, - value=tomo.prj_imgs.shape[2] / 2, - ) - - extra_options = Text( - description="Extra options: ", - placeholder='{"MinConstraint": 0}', - style=extend_description_style, - ) - - recon_start_button = Button( - description="After choosing all of the options above, click this button to start the reconstruction.", - disabled=True, - button_style="info", - tooltip="Make sure to choose all of the buttons above before clicking this button", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - - # enable the alignment gui if on. - def radio_recon_true(change): - if change.new == 0: - radio_recon_fulldataset.disabled = False - recon_start_button.disabled = False - reconmetadata["recondata"] = True - reconmetadata["methods"]["SIRT_CUDA"] = {} - other_options_accordion.selected_index = 0 - methods_accordion.selected_index = 0 - save_options_accordion.selected_index = 0 - - elif change.new == 1: - radio_recon_fulldataset.disabled = True - recon_start_button.disabled = True - projection_range_x_recon.disabled = True - projection_range_y_recon.disabled = True - load_range_from_above.disabled = True - reconmetadata["recondata"] = False - other_options_accordion.selected_index = None - methods_accordion.selected_index = None - save_options_accordion.selected_index = None - - ####!!!!!!!!!!!!!! Fix plotting widget link - def radio_recon_full_partial(change): - if change.new == 1: - projection_range_x_recon.disabled = False - projection_range_y_recon.disabled = False - load_range_from_above.disabled = False - reconmetadata["recondata"] = True - load_range_from_above.description = ( - "Click to load projection range from plotting screen." - ) - load_range_from_above.icon = "" - elif change.new == 0: - if "range_y_link" in locals() or "range_y_link" in globals(): - range_y_link.unlink() - range_x_link.unlink() - load_range_from_above.button_style = "info" - load_range_from_above.description = ( - "Unlinked ranges. Enable partial range to link again." - ) - load_range_from_above.icon = "unlink" - projection_range_x_recon.value = [0, tomo.prj_imgs.shape[2] - 1] - projection_range_x_recon.disabled = True - projection_range_y_recon.value = [0, tomo.prj_imgs.shape[1] - 1] - projection_range_y_recon.disabled = True - load_range_from_above.disabled = True - meta["aligndata"] = False - - def load_range_from_above_onclick(self): - if self.button_style == "info": - global range_y_link, range_x_link - range_y_link = link( - (projection_range_y_movie, "value"), - (projection_range_y_recon, "value"), - ) - range_x_link = link( - (projection_range_x_movie, "value"), - (projection_range_x_recon, "value"), - ) - self.button_style = "success" - self.description = "Linked ranges. Click again to unlink." - self.icon = "link" - elif self.button_style == "success": - range_y_link.unlink() - range_x_link.unlink() - projection_range_x_recon.value = [0, tomo.prj_imgs.shape[2] - 1] - projection_range_y_recon.value = [0, tomo.prj_imgs.shape[1] - 1] - self.button_style = "info" - self.description = "Unlinked ranges. Click again to link." - self.icon = "unlink" - - method_output = Output() - output0 = Output() - - #################### START Recon ####################### - def set_options_and_run_align(self): - self.icon = "fas fa-cog fa-spin fa-lg" - self.description = ( - "Setting options and loading data into memory for reconstruction." - ) - reconmetadata["opts"]["num_iter"] = number_of_recon_iterations.value - reconmetadata["opts"]["center"] = center_of_rotation.value - reconmetadata["opts"]["prj_range_x"] = projection_range_x_recon.value - reconmetadata["opts"]["prj_range_y"] = projection_range_y_recon.value - reconmetadata["opts"]["extra_options"] = extra_options.value - #!!!!!!!!! what do these call backs do in recon. - reconmetadata["callbacks"]["button"] = self - reconmetadata["callbacks"]["methodoutput"] = method_output - reconmetadata["callbacks"]["output0"] = output0 - reconmetadata["opts"]["downsample"] = downsample_checkbox.value - reconmetadata["opts"]["downsample_factor"] = downsample_factor_text.value - if len(reconmetadata["methods"]) > 1: - reconmetadata["reconmultiple"] = True - try: - self.description = "Reconstructing your data." - ##### reconstruction function goes here. - # aligned_tomo_list.append(TomoAlign(tomo, reconmetadata)) - self.button_style = "success" - self.icon = "fa-check-square" - self.description = "Finished reconstruction." - except: - self.button_style = "warning" - self.icon = "exclamation-triangle" - self.description = "Something went wrong." - - ############################# METHOD CHOOSER BOX ############################ - recon_FP_CUDA = Checkbox(description="FP_CUDA") - recon_BP_CUDA = Checkbox(description="BP_CUDA") - recon_FBP_CUDA = Checkbox(description="FBP_CUDA") - ### !!!!!!!! sirt cuda has options - maybe make them into a radio chooser - recon_SIRT_CUDA = Checkbox(description="SIRT_CUDA") - recon_SIRT_CUDA_option1 = Checkbox(description="SIRT Plugin-Faster", disabled=False) - recon_SIRT_CUDA_option2 = Checkbox(description="SIRT 3D-Fastest", disabled=False) - recon_SIRT_CUDA_checkboxes = [ - recon_SIRT_CUDA, - recon_SIRT_CUDA_option1, - recon_SIRT_CUDA_option2, - ] - recon_SART_CUDA = Checkbox(description="SART_CUDA") - recon_CGLS_CUDA = Checkbox(description="CGLS_CUDA") - recon_MLEM_CUDA = Checkbox(description="MLEM_CUDA") - recon_method_list = [ - recon_FP_CUDA, - recon_BP_CUDA, - recon_FBP_CUDA, - recon_SART_CUDA, - recon_CGLS_CUDA, - recon_MLEM_CUDA, - ] - - ####### Toggling on options if you select SIRT. Copy the observe function below - ######## if more options are needed. - - def toggle_on(change, opt_list, dictname): - if change.new == 1: - reconmetadata["methods"][dictname] = {} - for option in opt_list: - option.disabled = False - if change.new == 0: - reconmetadata["methods"].pop(dictname) - for option in opt_list: - option.value = 0 - option.disabled = True - - recon_SIRT_CUDA.observe( - functools.partial( - toggle_on, opt_list=recon_SIRT_CUDA_option_list, dictname="SIRT_CUDA" - ), - names=["value"], - ) - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_dict_on_checkmark(change, opt_list, dictname): - reconmetadata["methods"][dictname] = create_option_dictionary(opt_list) - - # Makes generator for mapping of options to observe functions. - # If other options needed for other reconstruction methods, use similar - - list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=recon_SIRT_CUDA_option_list, - dictname="SIRT_CUDA", - ), - names=["value"], - ) - for opt in recon_SIRT_CUDA_option_list - ) - ) - - sirt_hbox = HBox([recon_SIRT_CUDA]) - - recon_method_box = VBox( - [ - VBox(recon_method_list, layout=widgets.Layout(flex_flow="row wrap")), - sirt_hbox, - ] - ) - - ##############################Alignment start button???####################### - radio_align.observe(radio_recon_true, names="index") - radio_recon_fulldataset.observe(radio_recon_full_partial, names="index") - load_range_from_above.on_click(load_range_from_above_onclick) - recon_start_button.on_click(set_options_and_run_align) - - #######################DOWNSAMPLE CHECKBOX############################ - def downsample_turn_on(change): - if change.new == 1: - reconmetadata["opts"]["downsample"] = True - downsample_factor_text.disabled = False - if change.new == 0: - reconmetadata["opts"]["downsample"] = False - downsample_factor_text.disabled = True - - downsample_checkbox = Checkbox(description="Downsample?", value=0) - reconmetadata["opts"]["downsample"] = False - downsample_checkbox.observe(downsample_turn_on) - - downsample_factor_text = BoundedFloatText( - value=0.5, min=0.001, max=1.0, description="Downsampling factor:", disabled=True - ) - - ######################SAVING OPTIONS######################## - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_save_dict_on_checkmark(change, opt_list): - reconmetadata["save_opts"] = create_option_dictionary(opt_list) - - save_opts = ["tomo_before", "recon", "tiff", "npy"] - - def create_save_checkboxes(opts): - checkboxes = [ - Checkbox(description=opt, style=extend_description_style,) for opt in opts - ] - return checkboxes - - save_checkboxes = create_save_checkboxes(save_opts) - - list( - ( - opt.observe( - functools.partial( - create_save_dict_on_checkmark, opt_list=save_checkboxes, - ), - names=["value"], - ) - for opt in save_checkboxes - ) - ) - - save_hbox = HBox( - save_checkboxes, - layout=Layout(flex_wrap="wrap", justify_content="space-between"), - ) - - save_options_accordion = Accordion( - children=[save_hbox], - selected_index=None, - layout=Layout(width="100%"), - titles=("Save Options",), - ) - - #################### ALIGNMENT BOX ORGANIZATION ######################## - radio_description = "Reconstruct your data?" - partial_radio_description = ( - "Would you like to use the full dataset, or a partial dataset?" - ) - radio_description = HTML( - value="

" - + radio_description - + "

" - ) - partial_radio_description = HTML( - value="

" - + partial_radio_description - + "

" - ) - - pixel_range_slider_vb = VBox( - [ - HBox( - [load_range_from_above], - justify_content="center", - align_content="center", - ), - projection_range_x_recon, - projection_range_y_recon, - ], - layout=Layout(width="30%"), - justify_content="center", - align_items="space-between", - ) - - hb1 = HBox( - [ - radio_description, - radio_align, - partial_radio_description, - radio_recon_fulldataset, - pixel_range_slider_vb, - ], - layout=Layout( - width="auto", - justify_content="center", - align_items="center", - flex="flex-grow", - ), - ) - hb2 = HBox( - [recon_start_button], layout=Layout(width="auto", justify_content="center") - ) - methods_accordion = Accordion( - children=[recon_method_box], selected_index=None, titles=("Methods",) - ) - - other_options_accordion = Accordion( - children=[ - VBox( - [ - HBox( - [ - number_of_recon_iterations, - center_of_rotation, - upsample_factor, - ], - layout=Layout( - flex_wrap="wrap", justify_content="space-between" - ), - ), - HBox( - [downsample_checkbox, downsample_factor_text,], - layout=Layout( - flex_wrap="wrap", justify_content="space-between" - ), - ), - extra_options, - ], - layout=Layout(width="100%", height="100%"), - ) - ], - selected_index=None, - layout=Layout(width="100%"), - titles=("Other options",), - ) - - recon_dashboard = VBox( - children=[ - hb1, - methods_accordion, - save_options_accordion, - other_options_accordion, - hb2, - method_output, - ] - ) - - return recon_dashboard, plot_vbox, recon_files diff --git a/tomopyui/widgets/archive/recon_dashboard.py b/tomopyui/widgets/archive/recon_dashboard.py deleted file mode 100644 index 2dd065d..0000000 --- a/tomopyui/widgets/archive/recon_dashboard.py +++ /dev/null @@ -1,99 +0,0 @@ -from ipywidgets import * -import functools -import tomopy.data.tomodata as td -from .plot_aligned_data import plot_aligned_data -from .multiple_recon import make_recon_tab - - -def make_recon_dashboard( - reconmetadata, - generalmetadata, - importmetadata, - alignmentmetadata, - alignmentdata, - widget_linker, -): - plot_vbox, recon_files = plot_aligned_data( - reconmetadata, - alignmentmetadata, - importmetadata, - generalmetadata, - alignmentdata, - widget_linker, - ) - - main_logger = generalmetadata["main_logger"] - main_handler = generalmetadata["main_handler"] - - # Adding recon tabs - extend_description_style = {"description_width": "auto"} - recon_tabs = [] - recon_tabs_titles = [] - - def make_recon_tabs(self): - self.icon = "fas fa-cog fa-spin fa-lg" - self.button_style = "info" - recon_tabs = [] - recon_tabs_titles = [] - for key in reconmetadata["tomo"]: - if "fpath" in reconmetadata["tomo"][key]: - recon_tabs.append(make_recon_tab(reconmetadata["tomo"][key])) - if "fname" in reconmetadata["tomo"][key]: - recon_tabs_titles.append(reconmetadata["tomo"][key]["fname"]) - else: - recon_tabs_titles.append(reconmetadata["tomo"][key]["fpath"]) - recon_tabs.children = recon_dashboard - recon_tabs.titles = recon_dashboard_titles - self.icon = "fa-check-square" - self.button_style = "success" - self.description = "Make your edits to the reconstruction options below. Click this again to upload more data." - recon_tab_vbox.children = recon_tab_vbox.children + (start_recon_button,) - - make_recon_tabs_button = Button( - description="Press this button after you finish uploading.", - disabled=False, - button_style="info", - tooltip="", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - - make_recon_tabs_button.on_click(make_recon_tabs) - - method_output = Output() - output0 = Output() - - #################### START Recon ####################### - def set_options_and_run_align(self): - self.icon = "fas fa-cog fa-spin fa-lg" - self.description = ( - "Setting options and loading data into memory for reconstruction." - ) - - try: - self.description = "Reconstructing your data." - # for i in len() - self.button_style = "success" - self.icon = "fa-check-square" - self.description = "Finished reconstruction." - except: - self.button_style = "warning" - self.icon = "exclamation-triangle" - self.description = "Something went wrong." - - start_recon_button = Button( - description="After finishing your edits above, click here to start reconstruction.", - disabled=False, - button_style="info", - tooltip="", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - recon_tab = Tab() - recon_tab_vbox = VBox([make_recon_tabs_button, recon_tab]) - recon_dashboard_tabs = [recon_files, plot_vbox, recon_tab_vbox, main_handler.out] - recon_dashboard_titles = ["Upload", "Plot", "Reconstruction", "Log"] - recon_dashboard = Tab(titles=recon_dashboard_titles) - recon_dashboard.children = recon_dashboard_tabs - - return recon_dashboard diff --git a/tomopyui/widgets/archive/recon_options_grid.py b/tomopyui/widgets/archive/recon_options_grid.py deleted file mode 100644 index bebd5ed..0000000 --- a/tomopyui/widgets/archive/recon_options_grid.py +++ /dev/null @@ -1,214 +0,0 @@ -from ipywidgets import * -import functools - -recon_grid = GridspecLayout(7, 4) - -FP_CUDA = Checkbox(description="FP_CUDA") -FP_CUDA_option1 = Checkbox(description="option1", disabled=True) -FP_CUDA_option2 = Checkbox(description="option2", disabled=True) -FP_CUDA_option3 = Checkbox(description="option3", disabled=True) -FP_CUDA_option_list = [FP_CUDA_option1, FP_CUDA_option2, FP_CUDA_option3] - -BP_CUDA = Checkbox(description="BP_CUDA") -BP_CUDA_option1 = Checkbox(description="option1", disabled=True) -BP_CUDA_option2 = Checkbox(description="option2", disabled=True) -BP_CUDA_option3 = Checkbox(description="option3", disabled=True) -BP_CUDA_option_list = [BP_CUDA_option1, BP_CUDA_option2, BP_CUDA_option3] - -FBP_CUDA = Checkbox(description="FBP_CUDA") -FBP_CUDA_option1 = Checkbox(description="option1", disabled=True) -FBP_CUDA_option2 = Checkbox(description="option2", disabled=True) -FBP_CUDA_option3 = Checkbox(description="option3", disabled=True) -FBP_CUDA_option_list = [FBP_CUDA_option1, FBP_CUDA_option2, FBP_CUDA_option3] - -SIRT_CUDA = Checkbox(description="SIRT_CUDA") -SIRT_CUDA_option1 = Checkbox(description="option1", disabled=True) -SIRT_CUDA_option2 = Checkbox(description="option2", disabled=True) -SIRT_CUDA_option3 = Checkbox(description="option3", disabled=True) -SIRT_CUDA_option_list = [SIRT_CUDA_option1, SIRT_CUDA_option2, SIRT_CUDA_option3] - -SART_CUDA = Checkbox(description="SART_CUDA") -SART_CUDA_option1 = Checkbox(description="option1", disabled=True) -SART_CUDA_option2 = Checkbox(description="option2", disabled=True) -SART_CUDA_option3 = Checkbox(description="option3", disabled=True) -SART_CUDA_option_list = [SART_CUDA_option1, SART_CUDA_option2, SART_CUDA_option3] -CGLS_CUDA = Checkbox(description="CGLS_CUDA") -CGLS_CUDA_option1 = Checkbox(description="option1", disabled=True) -CGLS_CUDA_option2 = Checkbox(description="option2", disabled=True) -CGLS_CUDA_option3 = Checkbox(description="option3", disabled=True) -CGLS_CUDA_option_list = [CGLS_CUDA_option1, CGLS_CUDA_option2, CGLS_CUDA_option3] - -MLEM_CUDA = Checkbox(description="MLEM_CUDA") -MLEM_CUDA_option1 = Checkbox(description="option1", disabled=True) -MLEM_CUDA_option2 = Checkbox(description="option2", disabled=True) -MLEM_CUDA_option3 = Checkbox(description="option3", disabled=True) -MLEM_CUDA_option_list = [MLEM_CUDA_option1, MLEM_CUDA_option2, MLEM_CUDA_option3] - -method_list = [FP_CUDA, BP_CUDA, FBP_CUDA, SIRT_CUDA, SART_CUDA, CGLS_CUDA, MLEM_CUDA] - - -def toggle_on(change, opt_list, dictname): - if change.new == 1: - reconmetadata[dictname] = {} - for option in opt_list: - option.disabled = False - if change.new == 0: - reconmetadata.pop(dictname) - for option in opt_list: - option.value = 0 - option.disabled = True - - -FP_CUDA.observe( - functools.partial(toggle_on, opt_list=FP_CUDA_option_list, dictname="FP_CUDA"), - names=["value"], -) -BP_CUDA.observe( - functools.partial(toggle_on, opt_list=BP_CUDA_option_list, dictname="BP_CUDA"), - names=["value"], -) -FBP_CUDA.observe( - functools.partial(toggle_on, opt_list=FBP_CUDA_option_list, dictname="FBP_CUDA"), - names=["value"], -) -SIRT_CUDA.observe( - functools.partial(toggle_on, opt_list=SIRT_CUDA_option_list, dictname="SIRT_CUDA"), - names=["value"], -) -SART_CUDA.observe( - functools.partial(toggle_on, opt_list=SART_CUDA_option_list, dictname="SART_CUDA"), - names=["value"], -) -CGLS_CUDA.observe( - functools.partial(toggle_on, opt_list=CGLS_CUDA_option_list, dictname="CGLS_CUDA"), - names=["value"], -) -MLEM_CUDA.observe( - functools.partial(toggle_on, opt_list=MLEM_CUDA_option_list, dictname="MLEM_CUDA"), - names=["value"], -) - - -def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - -def create_dict_on_checkmark(change, opt_list, dictname): - reconmetadata[dictname] = create_option_dictionary(opt_list) - - -# Makes generator for mapping of options to observe functions. Allows for the check boxes to be clicked, and sends the results to -# the reconmetadata dictionary. -list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=FP_CUDA_option_list, - dictname="FP_CUDA", - ), - names=["value"], - ) - for opt in FP_CUDA_option_list - ) -) -list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=BP_CUDA_option_list, - dictname="BP_CUDA", - ), - names=["value"], - ) - for opt in BP_CUDA_option_list - ) -) -list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=FBP_CUDA_option_list, - dictname="FBP_CUDA", - ), - names=["value"], - ) - for opt in FBP_CUDA_option_list - ) -) -list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=SIRT_CUDA_option_list, - dictname="SIRT_CUDA", - ), - names=["value"], - ) - for opt in SIRT_CUDA_option_list - ) -) -list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=SART_CUDA_option_list, - dictname="SART_CUDA", - ), - names=["value"], - ) - for opt in SART_CUDA_option_list - ) -) -list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=CGLS_CUDA_option_list, - dictname="CGLS_CUDA", - ), - names=["value"], - ) - for opt in CGLS_CUDA_option_list - ) -) -list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=MLEM_CUDA_option_list, - dictname="MLEM_CUDA", - ), - names=["value"], - ) - for opt in MLEM_CUDA_option_list - ) -) - - -def fill_grid(method, opt_list, linenumber, grid): - grid[linenumber, 0] = method - i = 1 - for option in opt_list: - grid[linenumber, i] = option - i += 1 - - -fill_grid(FP_CUDA, FP_CUDA_option_list, 0, recon_grid) -fill_grid(BP_CUDA, BP_CUDA_option_list, 1, recon_grid) -fill_grid(FBP_CUDA, FBP_CUDA_option_list, 2, recon_grid) -fill_grid(SIRT_CUDA, SIRT_CUDA_option_list, 3, recon_grid) -fill_grid(SART_CUDA, SART_CUDA_option_list, 4, recon_grid) -fill_grid(CGLS_CUDA, CGLS_CUDA_option_list, 5, recon_grid) -fill_grid(MLEM_CUDA, MLEM_CUDA_option_list, 6, recon_grid) - -# FP_CUDA.observe(functools.partial(toggle_on, opt_list=FP_CUDA_option_list), names=['value']) - -return recon_grid diff --git a/tomopyui/widgets/archive/recon_tab.py b/tomopyui/widgets/archive/recon_tab.py deleted file mode 100644 index b354b2b..0000000 --- a/tomopyui/widgets/archive/recon_tab.py +++ /dev/null @@ -1,470 +0,0 @@ -import tifffile as tf -from ipywidgets import * -import glob -from .debouncer import debounce -import functools -import json - - -def make_recon_tab(recon_tomo_metadata): - - extend_description_style = {"description_width": "auto"} - fpath = recon_tomo_metadata["fpath"] - fname = recon_tomo_metadata["fname"] - recon_tomo_metadata["opts"] = {} - recon_tomo_metadata["methods"] = {} - recon_tomo_metadata["save_opts"] = {} - - # tomo_number = int(filter(str.isdigit, box_title)) - - radio_recon = RadioButtons( - options=["Yes", "No"], - style=extend_description_style, - layout=Layout(width="20%"), - value="No", - ) - - radio_recon_fulldataset = RadioButtons( - options=["Full", "Partial"], - style=extend_description_style, - layout=Layout(width="20%"), - disabled=True, - value="Full", - ) - - projection_range_x_recon = IntRangeSlider( - value=[0, 10], - min=0, - max=10, - step=1, - description="Projection X Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="100%"), - style=extend_description_style, - ) - - projection_range_y_recon = IntRangeSlider( - value=[0, 10], - min=0, - max=10, - step=1, - description="Projection Y Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - layout=Layout(width="100%"), - style=extend_description_style, - ) - - def activate_box(change): - if change.new == 0: - radio_recon_fulldataset.disabled = False - recon_tomo_metadata["reconstruct"] = True - recon_tomo_metadata["opts"] = {} - recon_tomo_metadata["methods"] = {} - recon_tomo_metadata["save_opts"] = {} - save_options_accordion.selected_index = 0 - # recon_tomo_metadata["methods"]["SIRT_CUDA"] = {} - options_accordion.selected_index = 0 - methods_accordion.selected_index = 0 - elif change.new == 1: - radio_recon_fulldataset.disabled = True - projection_range_x_recon.disabled = True - projection_range_y_recon.disabled = True - recon_tomo_metadata["reconstruct"] = False - recon_tomo_metadata.pop("opts") - recon_tomo_metadata.pop("methods") - recon_tomo_metadata.pop("save_opts") - save_options_accordion.selected_index = None - options_accordion.selected_index = None - methods_accordion.selected_index = None - - def set_projection_ranges(sizeY, sizeX): - projection_range_x_recon.max = sizeX - 1 - projection_range_y_recon.max = sizeY - 1 - # projection_range_z_recon.max = sizeZ-1 - projection_range_x_recon.value = [0, sizeX - 1] - projection_range_y_recon.value = [0, sizeY - 1] - # projection_range_z_recon.value = [0, sizeZ-1] - recon_tomo_metadata["prj_range_x"] = projection_range_x_recon.value - recon_tomo_metadata["prj_range_y"] = projection_range_y_recon.value - - def load_tif_shape_tag(folder_import=False): - os.chdir(fpath) - tiff_count_in_folder = len(glob.glob1(fpath, "*.tif")) - global sizeY, sizeX - if folder_import: - _tomo = td.TomoData(metadata=recon_tomo_metadata) - size = _tomo.prj_imgs.shape - # sizeZ = size[0] - sizeY = size[1] - sizeX = size[2] - set_projection_ranges(sizeY, sizeX) - - else: - with tf.TiffFile(fname) as tif: - if tiff_count_in_folder > 50: - sizeX = tif.pages[0].tags["ImageWidth"].value - sizeY = tif.pages[0].tags["ImageLength"].value - # sizeZ = tiff_count_in_folder # can maybe use this later - else: - imagesize = tif.pages[0].tags["ImageDescription"] - size = json.loads(imagesize.value)["shape"] - sizeY = size[1] - sizeX = size[2] - set_projection_ranges(sizeY, sizeX) - - def load_npy_shape(): - os.chdir(fpath) - size = np.load(fname, mmap_mode="r").shape - global sizeY, sizeX - sizeY = size[1] - sizeX = size[2] - set_projection_ranges(sizeY, sizeX) - - def activate_full_partial(change): - if change.new == 1: - recon_tomo_metadata["partial"] = True - projection_range_x_recon.disabled = False - projection_range_y_recon.disabled = False - # projection_range_z_recon.disabled = False - if fname != "": - if fname.__contains__(".tif"): - load_tif_shape_tag() - elif recon_tomo_metadata["fname"].__contains__(".npy"): - load_npy_shape() - else: - load_tif_shape_tag(folder_import=True) - elif change.new == 0: - recon_tomo_metadata["partial"] = False - set_projection_ranges(sizeY, sizeX) - projection_range_x_recon.disabled = True - projection_range_y_recon.disabled = True - # projection_range_z_recon.disabled = True - - recon_tomo_metadata["partial"] = False - radio_recon.observe(activate_box, names="index") - radio_recon_fulldataset.observe(activate_full_partial, names="index") - - #### callbacks for projection range sliders - - @debounce(0.2) - def projection_range_x_update_dict(change): - recon_tomo_metadata["prj_range_x"] = change.new - - projection_range_x_recon.observe(projection_range_x_update_dict, "value") - - @debounce(0.2) - def projection_range_y_update_dict(change): - recon_tomo_metadata["prj_range_y"] = change.new - - projection_range_y_recon.observe(projection_range_y_update_dict, "value") - - #### downsampling - recon_tomo_metadata["opts"]["downsample"] = False - recon_tomo_metadata["opts"]["downsample_factor"] = 1 - - def downsample_turn_on(change): - if change.new == 1: - recon_tomo_metadata["opts"]["downsample"] = True - recon_tomo_metadata["opts"][ - "downsample_factor" - ] = downsample_factor_text.value - downsample_factor_text.disabled = False - if change.new == 0: - recon_tomo_metadata["opts"]["downsample"] = False - recon_tomo_metadata["opts"]["downsample_factor"] = 1 - downsample_factor_text.value = 1 - downsample_factor_text.disabled = True - - downsample_checkbox = Checkbox(description="Downsample?", value=0) - downsample_checkbox.observe(downsample_turn_on) - - def downsample_factor_update_dict(change): - recon_tomo_metadata["opts"]["downsample_factor"] = change.new - - downsample_factor_text = BoundedFloatText( - value=1, - min=0.001, - max=1.0, - description="Downsampling factor:", - disabled=True, - style=extend_description_style, - ) - - downsample_factor_text.observe(downsample_factor_update_dict, names="value") - - #### radio descriptions - - radio_description = "Would you like to reconstruct this dataset?" - partial_radio_description = ( - "Would you like to use the full dataset, or a partial dataset?" - ) - radio_description = HTML( - value="

" - + radio_description - + "

" - ) - partial_radio_description = HTML( - value="

" - + partial_radio_description - + "

" - ) - - #### Saving options - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_save_dict_on_checkmark(change, opt_list): - recon_tomo_metadata["save_opts"] = create_option_dictionary(opt_list) - - save_opts = ["tomo_before", "recon", "tiff", "npy"] - recon_tomo_metadata["save_opts"] = {key: None for key in save_opts} - - def create_save_checkboxes(opts): - checkboxes = [ - Checkbox(description=opt, style=extend_description_style,) for opt in opts - ] - return checkboxes - - save_checkboxes = create_save_checkboxes(save_opts) - - list( - ( - opt.observe( - functools.partial( - create_save_dict_on_checkmark, opt_list=save_checkboxes, - ), - names=["value"], - ) - for opt in save_checkboxes - ) - ) - - save_hbox = HBox( - save_checkboxes, - layout=Layout(flex_wrap="wrap", justify_content="space-between"), - ) - - save_options_accordion = Accordion( - children=[save_hbox], - selected_index=None, - layout=Layout(width="100%"), - titles=("Save Options",), - ) - - #### Methods checkboxes - - recon_FP_CUDA = Checkbox(description="FP_CUDA") - recon_BP_CUDA = Checkbox(description="BP_CUDA") - recon_FBP_CUDA = Checkbox(description="FBP_CUDA") - ### !!!!!!!! sirt cuda has options - maybe make them into a radio chooser - recon_SIRT_CUDA = Checkbox(description="SIRT_CUDA") - recon_SIRT_CUDA_option1 = Checkbox(description="SIRT Plugin-Faster", disabled=False) - recon_SIRT_CUDA_option2 = Checkbox(description="SIRT 3D-Fastest", disabled=False) - recon_SIRT_CUDA_option_list = [ - recon_SIRT_CUDA_option1, - recon_SIRT_CUDA_option2, - ] - recon_SIRT_CUDA_checkboxes = [ - recon_SIRT_CUDA, - recon_SIRT_CUDA_option1, - recon_SIRT_CUDA_option2, - ] - recon_SART_CUDA = Checkbox(description="SART_CUDA") - recon_CGLS_CUDA = Checkbox(description="CGLS_CUDA") - recon_MLEM_CUDA = Checkbox(description="MLEM_CUDA") - recon_method_list = [ - recon_FP_CUDA, - recon_BP_CUDA, - recon_FBP_CUDA, - recon_SART_CUDA, - recon_CGLS_CUDA, - recon_MLEM_CUDA, - ] - - ####### Toggling on options if you select SIRT. Copy the observe function below - ######## if more options are needed. - - def toggle_on(change, opt_list, dictname): - if change.new == 1: - recon_tomo_metadata["methods"][dictname] = {} - for option in opt_list: - option.disabled = False - if change.new == 0: - recon_tomo_metadata["methods"].pop(dictname) - for option in opt_list: - option.value = 0 - option.disabled = True - - recon_SIRT_CUDA.observe( - functools.partial( - toggle_on, opt_list=recon_SIRT_CUDA_option_list, dictname="SIRT_CUDA" - ), - names=["value"], - ) - - def create_option_dictionary(opt_list): - opt_dictionary = {opt.description: opt.value for opt in opt_list} - return opt_dictionary - - def create_dict_on_checkmark(change, opt_list, dictname): - recon_tomo_metadata["methods"][dictname] = create_option_dictionary(opt_list) - - def create_dict_on_checkmark_no_options(change): - if change.new == True: - recon_tomo_metadata["methods"][change.owner.description] = {} - if change.new == False: - recon_tomo_metadata["methods"].pop(change.owner.description) - - [ - checkbox.observe(create_dict_on_checkmark_no_options) - for checkbox in recon_method_list - ] - # Makes generator for mapping of options to observe functions. - # If other options needed for other reconstruction methods, use similar - - list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=recon_SIRT_CUDA_option_list, - dictname="SIRT_CUDA", - ), - names=["value"], - ) - for opt in recon_SIRT_CUDA_option_list - ) - ) - - list( - ( - opt.observe( - functools.partial( - create_dict_on_checkmark, - opt_list=recon_SIRT_CUDA_option_list, - dictname="SIRT_CUDA", - ), - names=["value"], - ) - for opt in recon_SIRT_CUDA_option_list - ) - ) - - sirt_hbox = HBox(recon_SIRT_CUDA_checkboxes) - - recon_method_box = VBox( - [ - VBox(recon_method_list, layout=widgets.Layout(flex_flow="row wrap")), - sirt_hbox, - ] - ) - - methods_accordion = Accordion( - children=[recon_method_box], selected_index=None, titles=("Methods",) - ) - - #### options - - # number of iterations - recon_tomo_metadata["opts"]["num_iter"] = 20 - - def update_num_iter_dict(change): - recon_tomo_metadata["opts"]["num_iter"] = change.new - - number_of_recon_iterations = IntText( - description="Number of Iterations: ", style=extend_description_style, value=20, - ) - number_of_recon_iterations.observe(update_num_iter_dict, names="value") - - # center of rotation - recon_tomo_metadata["opts"]["center"] = 0 - - def update_center_of_ration_dict(change): - recon_tomo_metadata["opts"]["center"] = change.new - - center_of_rotation = IntText( - description="Center of Rotation: ", - style=extend_description_style, - value=recon_tomo_metadata["opts"]["center"], - ) - center_of_rotation.observe(update_center_of_ration_dict, names="value") - - recon_tomo_metadata["opts"]["extra_options"] = None - - def update_extra_options_dict(change): - recon_tomo_metadata["opts"]["extra_options"] = change.new - - extra_options = Text( - description="Extra options: ", - placeholder='{"MinConstraint": 0}', - style=extend_description_style, - ) - extra_options.observe(update_extra_options_dict, names="value") - - downsample_hb = HBox( - [downsample_checkbox, downsample_factor_text], - layout=Layout(flex_wrap="wrap", justify_content="space-between"), - ) - - options_accordion = Accordion( - children=[ - HBox( - [ - number_of_recon_iterations, - center_of_rotation, - downsample_checkbox, - downsample_factor_text, - extra_options, - ], - layout=Layout(flex_flow="row wrap", justify_content="space-between"), - ), - ], - selected_index=None, - layout=Layout(width="100%"), - titles=("Options",), - ) - - #### putting it all together - sliders_box = VBox( - [projection_range_x_recon, projection_range_y_recon,], - layout=Layout(width="30%"), - justify_content="center", - align_items="space-between", - ) - - recon_initialization_box = HBox( - [ - radio_description, - radio_recon, - partial_radio_description, - radio_recon_fulldataset, - sliders_box, - ], - layout=Layout( - width="auto", - justify_content="center", - align_items="center", - flex="flex-grow", - ), - ) - - recon_tab = VBox( - [ - recon_initialization_box, - options_accordion, - methods_accordion, - save_options_accordion, - ] - ) - - return recon_tab diff --git a/tomopyui/widgets/archive/search_box_example.py b/tomopyui/widgets/archive/search_box_example.py deleted file mode 100644 index 5167e74..0000000 --- a/tomopyui/widgets/archive/search_box_example.py +++ /dev/null @@ -1,39 +0,0 @@ -import ipywidgets as widgets - -# Generate a dummy list -Allfileslist = ["{}".format(x) for x in range(600)] -# Search box + generate some checboxes -search_widget = widgets.Text( - placeholder="Type for older experiments", description="Search:", value="" -) -experiments = {} -options_widget = widgets.VBox(layout={"overflow": "auto"}) -default_options = [ - widgets.Checkbox(description=eachfilename, value=False) - for eachfilename in Allfileslist[-10:] -] - - -def whentextischanged(change): - """Dynamically update the widget experiments""" - search_input = change["new"] - if search_input == "": - # Reset search field, default to last 9 experiments - new_options = default_options - else: - # Filter by search - close_matches = [x for x in Allfileslist if search_input.lower() in x.lower()][ - :10 - ] - for name in close_matches: - if name not in experiments: - experiments[name] = widgets.Checkbox(description=name, value=False) - new_options = [experiments[eachfilename] for eachfilename in close_matches] - - options_widget.children = new_options - - -# Generate the vbox, search -multi_select = widgets.VBox([search_widget, options_widget]) -search_widget.observe(whentextischanged, names="value") -multi_select \ No newline at end of file diff --git a/tomopyui/widgets/archive/tomoalign_old.py b/tomopyui/widgets/archive/tomoalign_old.py deleted file mode 100644 index 7f42569..0000000 --- a/tomopyui/widgets/archive/tomoalign_old.py +++ /dev/null @@ -1,884 +0,0 @@ -from tqdm.notebook import tnrange, tqdm -from joblib import Parallel, delayed -from time import process_time, perf_counter, sleep -from skimage.registration import phase_cross_correlation - -# from skimage import transform -# removed because slower than ndi -from scipy import ndimage as ndi -from cupyx.scipy import ndimage as ndi_cp -from tomopy.recon import wrappers -from tomopy.prep.alignment import scale as scale_tomo -from contextlib import nullcontext -from tomopy.recon import algorithm -from skimage.transform import rescale -from tomopy.misc.corr import circ_mask -from copy import deepcopy, copy -from tomopy.recon.rotation import find_center, find_center_vo - -import tomopy.data.tomodata as td -import matplotlib.pyplot as plt -import datetime -import time -import json -import astra -import os -import tifffile as tf -import cupy as cp -import tomopy -import numpy as np - - -class TomoAlign: - """ - Class for performing alignments. - - Parameters - ---------- - tomo : TomoData object. - Normalize the raw tomography data with the TomoData class. Then, - initialize this class with a TomoData object. - metadata : metadata from setup in widget-based notebook. - """ - - # def __init__( - # self, - # tomo, - # metadata, - # alignment_wd=None, - # alignment_wd_child=None, - # prj_aligned=None, - # shift=None, - # sx=None, - # sy=None, - # recon=None, - # callbacks=None - # ): - - def __init__( - self, - Align, - tomo=None, - alignment_wd=None, - alignment_wd_child=None, - prj_aligned=None, - shift=None, - sx=None, - sy=None, - recon=None, - callbacks=None, - ): - if Align.tomo is None: - self.tomo = td.TomoData(Align.metadata) - self.metadata = metadata - self.prj_range_x = Align.prj_range_x - self.prj_range_y = Align.prj_range_y - self.shift = shift - self.sx = sx - self.sy = sy - self.conv = None - self.recon = recon - self.alignment_wd = Align.wd - self.alignment_wd_child = alignment_wd_child - - # setting up output callback context managers - if callbacks is not None: - if "methodoutput" in callbacks: - self.method_bar_cm = Align.method_output - else: - self.method_bar_cm = nullcontext() - if "output1" in callbacks: - self.output1_cm = Align.output1 - else: - self.output1_cm = nullcontext() - if "output2" in callbacks: - self.output2_cm = Align.output2 - else: - self.output2_cm = nullcontext() - else: - self.method_bar_cm = nullcontext() - self.output1_cm = nullcontext() - self.output2_cm = nullcontext() - - # creates working directory based on time - # creates multiple alignments based on - if self.metadata["alignmultiple"] == True: - self.make_wd_and_go() - self.align_multiple() - else: - if self.alignment_wd is None: - self.make_wd_and_go() - self.align() - - def make_wd_and_go(self): - now = datetime.datetime.now() - os.chdir(self.metadata["generalmetadata"]["workingdirectorypath"]) - dt_string = now.strftime("%Y%m%d-%H%M-") - os.mkdir(dt_string + "alignment") - os.chdir(dt_string + "alignment") - self.save_align_metadata() - if self.metadata["save_opts"]["tomo_before"]: - np.save("projections_before_alignment", self.tomo.prj_imgs) - self.alignment_wd = os.getcwd() - - def align_multiple(self): - - metadata_list = [] - for key in self.metadata["methods"]: - d = self.metadata["methods"] - keys_to_remove = set(self.metadata["methods"].keys()) - keys_to_remove.remove(key) - _d = {k: d[k] for k in set(list(d.keys())) - keys_to_remove} - _metadata = self.metadata.copy() - _metadata["methods"] = _d - _metadata["alignmultiple"] = False - metadata_list.append(_metadata) - - for metadata in metadata_list: - self.callbacks["button"].description = ( - "Starting" + " " + list(metadata["methods"].keys())[0] - ) - self.__init__(self.tomo, metadata, alignment_wd=self.alignment_wd) - - def align(self): - """ - Aligns a TomoData object using options in GUI. - """ - proj_range_x_low = self.metadata["opts"]["prj_range_x"][0] - proj_range_x_high = self.metadata["opts"]["prj_range_x"][1] - proj_range_y_low = self.metadata["opts"]["prj_range_y"][0] - proj_range_y_high = Align.metadata["opts"]["prj_range_y"][1] - self.prj_aligned = self.tomo.prj_imgs[ - :, - proj_range_y_low:proj_range_y_high:1, - proj_range_x_low:proj_range_x_high:1, - ].copy() - - tic = time.perf_counter() - self.joint_astra_cupy() - toc = time.perf_counter() - - Align.metadata["alignment_time"] = { - "seconds": toc - tic, - "minutes": (toc - tic) / 60, - "hours": (toc - tic) / 3600, - } - - self.save_align_data() - - def save_align_metadata(self): - # from https://stackoverflow.com/questions/51674222/how-to-make-json-dumps-in-python-ignore-a-non-serializable-field - def safe_serialize(obj, f): - default = lambda o: f"<>" - return json.dump(obj, f, default=default, indent=4) - - with open("overall_alignment_metadata.json", "w+") as f: - a = safe_serialize(self.metadata, f) - - def save_align_data(self): - - # if on the second alignment, go into the directory most recently saved - if self.metadata["align_number"] > 0: - os.chdir(self.alignment_wd_child) - now = datetime.datetime.now() - dt_string = now.strftime("%Y%m%d-%H%M-") - method_str = list(self.metadata["methods"].keys())[0] - - if ( - "SIRT_CUDA" in self.metadata["methods"] - and "Faster" in self.metadata["methods"]["SIRT_CUDA"] - ): - if self.metadata["methods"]["SIRT_CUDA"]["Faster"]: - method_str = method_str + "-faster" - if self.metadata["methods"]["SIRT_CUDA"]["Fastest"]: - method_str = method_str + "-fastest" - os.mkdir(dt_string + method_str) - os.chdir(dt_string + method_str) - - # save child working directory for use in multiple alignments - self.alignment_wd_child = os.getcwd() - - # https://stackoverflow.com/questions/51674222/how-to-make-json-dumps-in-python-ignore-a-non-serializable-field - def safe_serialize(obj, f): - default = lambda o: f"<>" - return json.dump(obj, f, default=default, indent=4) - - with open("metadata.json", "w+") as f: - a = safe_serialize(self.metadata, f) - - if self.metadata["save_opts"]["tomo_after"]: - if self.metadata["save_opts"]["npy"]: - np.save("projections_after_alignment", self.tomo.prj_imgs) - if self.metadata["save_opts"]["tiff"]: - tf.imwrite("projections_after_alignment.tif", self.tomo.prj_imgs) - if ( - not self.metadata["save_opts"]["tiff"] - and not self.metadata["save_opts"]["npy"] - ): - tf.imwrite("projections_after_alignment.tif", self.tomo.prj_imgs) - if self.metadata["save_opts"]["recon"]: - if self.metadata["save_opts"]["npy"]: - np.save("last_recon", self.recon) - if self.metadata["save_opts"]["tiff"]: - tf.imwrite("last_recon.tif", self.recon) - if ( - not self.metadata["save_opts"]["tiff"] - and not self.metadata["save_opts"]["npy"] - ): - tf.imwrite("last_recon.tif", self.recon) - - np.save("sx", self.sx) - np.save("sy", self.sy) - np.save("conv", self.conv) - - if self.metadata["align_number"] == 0: - os.chdir(self.alignment_wd) - else: - os.chdir(self.alignment_wd_child) - - def joint_astra_cupy(self): - # Initialize variables from metadata for ease of reading: - # ensure it only runs on 1 thread for CUDA - os.environ["TOMOPY_PYTHON_THREADS"] = "1" - num_iter = self.metadata["opts"]["num_iter"] - init_tomo_shape = self.prj_aligned.shape - downsample = self.metadata["opts"]["downsample"] - pad = self.metadata["opts"]["pad"] - method_str = list(self.metadata["methods"].keys())[0] - upsample_factor = self.metadata["opts"]["upsample_factor"] - num_batches = self.metadata["opts"]["batch_size"] # change to num_batches - - # Needs scaling for skimage float operations. - self.prj_aligned, scl = scale_tomo(self.prj_aligned) - - # pad sample after downsampling. this avoid uncessary allocation of - # memory to an already-large array if downsampled. - - if downsample: - downsample_factor = self.metadata["opts"]["downsample_factor"] - - # downsample images in stack - self.prj_aligned = rescale( - self.prj_aligned, - (1, downsample_factor, downsample_factor), - anti_aliasing=True, - ) - - #!!!!!!!!!!!! TODO: add option for finding center or specifying - center = find_center_vo(self.prj_aligned) - print("found center with vo") - print(center) - # add downsampled padding to the edges of the sample - pad_ds = tuple([int(downsample_factor * x) for x in pad]) - center = center + pad_ds[0] - self.prj_aligned, pad_ds = pad_projections(self.prj_aligned, pad_ds, 1) - else: - downsample_factor = 1 - pad_ds = pad - center = 197 - center = center + pad_ds[0] - self.prj_aligned, pad_ds = pad_projections(self.prj_aligned, pad_ds, 1) - - # Initialization of reconstruction dataset - tomo_shape = self.prj_aligned.shape - self.recon = np.empty( - (tomo_shape[1], tomo_shape[2], tomo_shape[2]), dtype=np.float32 - ) - - # add progress bar for method. roughly a full-loop progress bar. - # with self.method_bar_cm: - # method_bar = tqdm( - # total=num_iter, - # desc=options["method"], - # display=True, - # ) - - # Initialize shift arrays - self.sx = np.zeros((init_tomo_shape[0])) - self.sy = np.zeros((init_tomo_shape[0])) - self.conv = np.zeros((num_iter)) - - # start iterative alignment - for n in range(num_iter): - - _rec = self.recon - - if self.metadata["methods"]["SIRT_CUDA"]["Faster"] == True: - self.recon = self.recon_sirt_3D(self.prj_aligned, center=center) - elif self.metadata["methods"]["SIRT_CUDA"]["Fastest"] == True: - self.recon = self.recon_sirt_3D_allgpu( - self.prj_aligned, _rec, center=center - ) - else: - - # Options go into kwargs which go into recon() - kwargs = {} - options = {"proj_type": "cuda", "method": method_str, "num_iter": 1} - kwargs["options"] = options - - self.recon = algorithm.recon( - self.prj_aligned, - self.tomo.theta, - algorithm=wrappers.astra, - init_recon=_rec, - center=center, - ncore=None, - **kwargs, - ) - # update progress bar - # method_bar.update() - - # break up reconstruction into batches along z axis - self.recon = np.array_split(self.recon, num_batches, axis=0) - # may not need a copy. - _rec = self.recon.copy() - - # initialize simulated projection cpu array - sim = [] - - # begin simulating projections using astra. - # this could probably be made more efficient, right now I am not - # certain if I need to be deleting every time. - - with self.output1_cm: - self.output1_cm.clear_output() - simulate_projections(_rec, sim, center, self.tomo.theta) - # del _rec - sim = np.concatenate(sim, axis=1) - - # only flip the simulated datasets if using normal tomopy algorithm - # can remove if it is flipped in the algorithm - if ( - self.metadata["methods"]["SIRT_CUDA"]["Faster"] == False - and self.metadata["methods"]["SIRT_CUDA"]["Fastest"] == False - ): - sim = np.flip(sim, axis=0) - - # Cross correlation - shift_cpu = [] - batch_cross_correlation( - self.prj_aligned, - sim, - shift_cpu, - num_batches, - upsample_factor, - subset_correlation=False, - blur=False, - pad=pad_ds, - ) - self.shift = np.concatenate(shift_cpu, axis=1) - - # Shifting - ( - self.prj_aligned, - self.sx, - self.sy, - self.shift, - err, - pad_ds, - center, - ) = warp_prj_shift_cp( - self.prj_aligned, - self.sx, - self.sy, - self.shift, - num_batches, - pad_ds, - center, - downsample_factor=downsample_factor, - ) - self.conv[n] = np.linalg.norm(err) - with self.output2_cm: - self.output2_cm.clear_output(wait=True) - self.plotIm(sim) - self.plotSxSy(downsample_factor) - print(f"Error = {np.linalg.norm(err):3.3f}.") - - self.recon = np.concatenate(self.recon, axis=0) - mempool = cp.get_default_memory_pool() - mempool.free_all_blocks() - - # self.recon = np.concatenate(self.recon, axis=0) - # Re-normalize data - # method_bar.close() - self.prj_aligned *= scl - self.recon = circ_mask(self.recon, 0) - if downsample: - self.sx = self.sx / downsample_factor - self.sy = self.sy / downsample_factor - self.shift = self.shift / downsample_factor - - pad = tuple([x / downsample_factor for x in pad_ds]) - # make new dataset and pad/shift it for the next round - new_prj_imgs = deepcopy(self.tomo.prj_imgs) - new_prj_imgs, pad = pad_projections(new_prj_imgs, pad, 1) - new_prj_imgs = warp_prj_cp( - new_prj_imgs, self.sx, self.sy, num_batches, pad, use_corr_prj_gpu=False - ) - new_prj_imgs = trim_padding(new_prj_imgs) - self.tomo = td.TomoData( - prj_imgs=new_prj_imgs, metadata=self.metadata["importmetadata"]["tomo"] - ) - return self - - def plotIm(self, sim, projection_num=50): - fig = plt.figure(figsize=(8, 8)) - ax1 = plt.subplot(1, 2, 1) - ax2 = plt.subplot(1, 2, 2) - ax1.imshow(self.prj_aligned[projection_num], cmap="gray") - ax1.set_axis_off() - ax1.set_title("Projection Image") - ax2.imshow(sim[projection_num], cmap="gray") - ax2.set_axis_off() - ax2.set_title("Re-projected Image") - plt.show() - - def plotSxSy(self, downsample_factor): - plotrange = range(self.prj_aligned.shape[0]) - fig = plt.figure(figsize=(8, 8)) - ax1 = plt.subplot(2, 1, 1) - ax2 = plt.subplot(2, 1, 2) - ax1.set(xlabel="Projection number", ylabel="Pixel shift (not downsampled)") - ax1.plot(plotrange, self.sx / downsample_factor) - ax1.set_title("Sx") - ax2.plot(plotrange, self.sy / downsample_factor) - ax2.set_title("Sy") - ax2.set(xlabel="Projection number", ylabel="Pixel shift (not downsampled)") - plt.show() - - def recon_sirt_3D(self, prj, center): - # Init tomo in sinogram order - sinograms = algorithm.init_tomo(prj, 0) - num_proj = sinograms.shape[1] - num_y = sinograms.shape[0] - num_x = sinograms.shape[2] - # assume angles used are the same as parent tomography - angles = self.tomo.theta - proj_geom = astra.create_proj_geom("parallel3d", 1, 1, num_y, num_x, angles) - if center is not None: - center_shift = -(center - num_x / 2) - proj_geom = astra.geom_postalignment(proj_geom, (center_shift,)) - vol_geom = astra.create_vol_geom(num_x, num_x, num_y) - projector = astra.create_projector("cuda3d", proj_geom, vol_geom) - astra.plugin.register(astra.plugins.SIRTPlugin) - W = astra.OpTomo(projector) - rec_sirt = W.reconstruct( - "SIRT-PLUGIN", sinograms, self.metadata["opts"]["num_iter"] - ) - return rec_sirt - - def recon_sirt_3D_allgpu(self, prj, rec, center=None): - # Init tomo in sinogram order - sinograms = algorithm.init_tomo(prj, 0) - num_proj = sinograms.shape[1] - num_y = sinograms.shape[0] - num_x = sinograms.shape[2] - # assume angles used are the same as parent tomography - angles = self.tomo.theta - # create projection geometry with shape of - proj_geom = astra.create_proj_geom("parallel3d", 1, 1, num_y, num_x, angles) - # shifts the projection geometry so that it will reconstruct using the - # correct center. - if center is not None: - center_shift = -(center - num_x / 2) - proj_geom = astra.geom_postalignment(proj_geom, (center_shift,)) - vol_geom = astra.create_vol_geom(num_x, num_x, num_y) - sinograms_id = astra.data3d.create("-sino", proj_geom, sinograms) - rec_id = astra.data3d.create("-vol", vol_geom, rec) - reco_alg = "SIRT3D_CUDA" - cfg = astra.astra_dict(reco_alg) - cfg["ProjectionDataId"] = sinograms_id - cfg["ReconstructionDataId"] = rec_id - alg_id = astra.algorithm.create(cfg) - astra.algorithm.run(alg_id, 2) - rec_sirt = astra.data3d.get(rec_id) - astra.algorithm.delete(alg_id) - astra.data3d.delete(rec_id) - astra.data3d.delete(sinograms_id) - return rec_sirt - - -def transform_parallel(prj, sx, sy, shift, metadata): - num_theta = prj.shape[0] - err = np.zeros((num_theta + 1, 1)) - shift_y_condition = ( - metadata["opts"]["pad"][1] * metadata["opts"]["downsample_factor"] - ) - shift_x_condition = ( - metadata["opts"]["pad"][0] * metadata["opts"]["downsample_factor"] - ) - - def transform_algorithm(prj, shift, sx, sy, m): - shiftm = shift[:, m] - # don't let it shift if the value is larger than padding - if ( - np.absolute(sx[m] + shiftm[1]) < shift_x_condition - and np.absolute(sy[m] + shiftm[0]) < shift_y_condition - ): - sx[m] += shiftm[1] - sy[m] += shiftm[0] - err[m] = np.sqrt(shiftm[0] * shiftm[0] + shiftm[1] * shiftm[1]) - - # similarity transform shifts in (x, y) - # tform = transform.SimilarityTransform(translation=(shiftm[1], shiftm[0])) - # prj[m] = transform.warp(prj[m], tform, order=5) - - # found that ndi is much faster than the above warp - # uses opposite convention - shift_tuple = (shiftm[0], shiftm[1]) - shift_tuple = tuple([-1 * x for x in shift_tuple]) - prj[m] = ndi.shift(prj[m], shift_tuple, order=5) - - Parallel(n_jobs=-1, require="sharedmem")( - delayed(transform_algorithm)(prj, shift, sx, sy, m) - for m in range(num_theta) - # for m in tnrange(num_theta, desc="Transformation", leave=True) - ) - return prj, sx, sy, err - - -def warp_projections(prj, sx, sy, metadata): - num_theta = prj.shape[0] - err = np.zeros((num_theta + 1, 1)) - shift_y_condition = metadata["opts"]["pad"][1] - shift_x_condition = metadata["opts"]["pad"][0] - - def transform_algorithm_warponly(prj, sx, sy, m): - # don't let it shift if the value is larger than padding - if ( - np.absolute(sx[m]) < shift_x_condition - and np.absolute(sy[m]) < shift_y_condition - ): - # similarity transform shifts in (x, y) - # see above note for ndi switch - # tform = transform.SimilarityTransform(translation=(sx[m], sy[m])) - # prj[m] = transform.warp(prj[m], tform, order=5) - - shift_tuple = (sy[m], sx[m]) - shift_tuple = tuple([-1 * x for x in shift_tuple]) - prj[m] = ndi.shift(prj[m], shift_tuple, order=5) - - Parallel(n_jobs=-1, require="sharedmem")( - delayed(transform_algorithm_warponly)(prj, sx, sy, m) - for m in range(num_theta) - # for m in tnrange(num_theta, desc="Transformation", leave=True) - ) - return prj - - -def init_new_from_prior(prior_tomoalign, metadata): - prj_imgs = deepcopy(prior_tomoalign.tomo.prj_imgs) - new_tomo = td.TomoData( - prj_imgs=prj_imgs, metadata=metadata["importmetadata"]["tomo"] - ) - new_align_object = TomoAlign( - new_tomo, - metadata, - alignment_wd=prior_tomoalign.alignment_wd, - alignment_wd_child=prior_tomoalign.alignment_wd_child, - ) - return new_align_object - - -def trim_padding(prj): - # https://stackoverflow.com/questions/54567986/python-numpy-remove-empty-zeroes-border-of-3d-array - xs, ys, zs = np.where(prj > 1e-7) - - minxs = np.min(xs) - maxxs = np.max(xs) - minys = np.min(ys) - maxys = np.max(ys) - minzs = np.min(zs) - maxzs = np.max(zs) - - # extract cube with extreme limits of where are the values != 0 - result = prj[minxs : maxxs + 1, minys : maxys + 1, minzs : maxzs + 1] - # not sure why +1 here. - - return result - - -def simulate_projections(rec, sim, center, theta): - for batch in range(len(rec)): - # for batch in tnrange(len(rec), desc="Re-projection", leave=True): - _rec = rec[batch] - vol_geom = astra.create_vol_geom(_rec.shape[1], _rec.shape[1], _rec.shape[0]) - phantom_id = astra.data3d.create("-vol", vol_geom, data=_rec) - proj_geom = astra.create_proj_geom( - "parallel3d", 1, 1, _rec.shape[0], _rec.shape[1], theta, - ) - if center is not None: - center_shift = -(center - _rec.shape[1] / 2) - proj_geom = astra.geom_postalignment(proj_geom, (center_shift,)) - projections_id, _sim = astra.creators.create_sino3d_gpu( - phantom_id, proj_geom, vol_geom - ) - _sim = _sim.swapaxes(0, 1) - sim.append(_sim) - astra.data3d.delete(projections_id) - astra.data3d.delete(phantom_id) - - -def batch_cross_correlation( - prj, - sim, - shift_cpu, - num_batches, - upsample_factor, - blur=True, - rin=0.5, - rout=0.8, - subset_correlation=False, - mask_sim=True, - pad=(0, 0), -): - # TODO: the sign convention for shifting is bad here. - # To fix this, change to - # shift_gpu = phase_cross_correlation(_sim_gpu, _prj_gpu...) - # convention right now: - # if _sim is down and to the right, the shift tuple will be (-, -) - # before going positive. - # split into arrays for batch. - _prj = np.array_split(prj, num_batches, axis=0) - _sim = np.array_split(sim, num_batches, axis=0) - for batch in range(len(_prj)): - # for batch in tnrange(len(_prj), desc="Cross-correlation", leave=True): - # projection images have been shifted. mask also shifts. - # apply the "moving" mask to the simulated projections - # simulated projections have data outside of the mask. - if subset_correlation: - _prj_gpu = cp.array( - _prj[batch][ - :, 2 * pad[1] : -2 * pad[1] : 1, 2 * pad[0] : -2 * pad[0] : 1 - ], - dtype=cp.float32, - ) - _sim_gpu = cp.array( - _sim[batch][ - :, 2 * pad[1] : -2 * pad[1] : 1, 2 * pad[0] : -2 * pad[0] : 1 - ], - dtype=cp.float32, - ) - else: - _prj_gpu = cp.array(_prj[batch], dtype=cp.float32) - _sim_gpu = cp.array(_sim[batch], dtype=cp.float32) - - if mask_sim: - _sim_gpu = cp.where(_prj_gpu < 1e-7, 0, _sim_gpu) - - if blur: - _prj_gpu = blur_edges_cp(_prj_gpu, rin, rout) - _sim_gpu = blur_edges_cp(_sim_gpu, rin, rout) - - # e.g. lets say sim is (-50, 0) wrt prj. This would correspond to - # a shift of [+50, 0] - # In the warping section, we have to now warp prj by (-50, 0), so the - # SAME sign of the shift value given here. - shift_gpu = phase_cross_correlation( - _sim_gpu, _prj_gpu, upsample_factor=upsample_factor, return_error=False, - ) - shift_cpu.append(cp.asnumpy(shift_gpu)) - # shift_cpu = np.concatenate(shift_cpu, axis=1) - - -def blur_edges_cp(prj, low=0, high=0.8): - """ - Blurs the edge of the projection images using cupy. - - Parameters - ---------- - prj : ndarray - 3D stack of projection images. The first dimension - is projection axis, second and third dimensions are - the x- and y-axes of the projection image, respectively. - low : scalar, optional - Min ratio of the blurring frame to the image size. - high : scalar, optional - Max ratio of the blurring frame to the image size. - - Returns - ------- - ndarray - Edge-blurred 3D stack of projection images. - """ - if type(prj) is np.ndarray: - prj_gpu = cp.array(prj, dtype=cp.float32) - else: - prj_gpu = prj - dx, dy, dz = prj_gpu.shape - rows, cols = cp.mgrid[:dy, :dz] - rad = cp.sqrt((rows - dy / 2) ** 2 + (cols - dz / 2) ** 2) - mask = cp.zeros((dy, dz)) - rmin, rmax = low * rad.max(), high * rad.max() - mask[rad < rmin] = 1 - mask[rad > rmax] = 0 - zone = cp.logical_and(rad >= rmin, rad <= rmax) - mask[zone] = (rmax - rad[zone]) / (rmax - rmin) - prj_gpu *= mask - return prj_gpu - - -def warp_prj_shift_cp( - prj, - sx, - sy, - shift, - num_batches, - pad, - center, - downsample_factor=1, - smart_shift=True, - smart_pad=True, -): - # Why is the error calculated in such a strange way? - # Will use the standard used in tomopy here, but think of different way to - # calculate error. - # TODO: add checks for sx, sy having the same dimension as prj - # - # If the shift starts to get larger than the padding in one direction, - # shift it to the center of the sx values. This should help to avoid - average_sx = None - average_sy = None - if smart_shift: - cond1 = sx.max() > 0.95 * pad[0] - cond2 = sy.max() > 0.95 * pad[1] - cond3 = np.absolute(sx.min()) > 0.95 * pad[0] - cond4 = np.absolute(sy.min()) > 0.95 * pad[1] - if cond1 or cond2 or cond3 or cond4: - print("applying smart shift") - print(f"sx max: {sx.max()}") - print(f"sx min: {sx.min()}") - average_sx = (sx.max() + sx.min()) / 2 - average_sy = (sy.max() + sy.min()) / 2 - sx_smart_shift = average_sx * np.ones_like(sx) - sy_smart_shift = average_sy * np.ones_like(sy) - sx -= sx_smart_shift - sy -= sy_smart_shift - print(f"sx max after shift: {sx.max()}") - print(f"sx min after shift: {sx.min()}") - center = center + average_sx - if smart_pad: - if average_sx < 1 and cond1 and cond3: - extra_pad = tuple([0.2 * pad[0], 0]) - center = center + extra_pad[0] - pad = np.array(extra_pad) + np.array(pad) - prj, extra_pad = pad_projections(prj, extra_pad, 1) - if average_sy < 1 and cond2 and cond4: - extra_pad = tuple([0, 0.2 * pad[1]]) - pad = np.array(extra_pad) + np.array(pad) - prj, extra_pad = pad_projections(prj, extra_pad, 1) - - num_theta = prj.shape[0] - # TODO: why +1?? - err = np.zeros((num_theta + 1, 1)) - shifted_bool = np.zeros((num_theta + 1, 1)) - - # split all arrays up into batches. - err = np.array_split(err, num_batches) - prj_cpu = np.array_split(prj, num_batches, axis=0) - sx = np.array_split(sx, num_batches, axis=0) - sy = np.array_split(sy, num_batches, axis=0) - shift = np.array_split(shift, num_batches, axis=1) - shifted_bool = np.array_split(shifted_bool, num_batches, axis=0) - for batch in range(len(prj_cpu)): - # for batch in tnrange(len(prj_cpu), desc="Shifting", leave=True): - _prj_gpu = cp.array(prj_cpu[batch], dtype=cp.float32) - - for image in range(_prj_gpu.shape[0]): - # err calc before if - - err[batch][image] = np.sqrt( - shift[batch][0, image] * shift[batch][0, image] - + shift[batch][1, image] * shift[batch][1, image] - ) - if ( - np.absolute(sx[batch][image] + shift[batch][1, image]) < pad[0] - and np.absolute(sy[batch][image] + shift[batch][0, image]) < pad[1] - ): - shifted_bool[batch][image] = 1 - sx[batch][image] += shift[batch][1, image] - sy[batch][image] += shift[batch][0, image] - shift_tuple = (shift[batch][0, image], shift[batch][1, image]) - _prj_gpu[image] = ndi_cp.shift(_prj_gpu[image], shift_tuple, order=5) - - prj_cpu[batch] = cp.asnumpy(_prj_gpu) - - # concatenate the final list and return - prj_cpu = np.concatenate(prj_cpu, axis=0) - err = np.concatenate(err) - shifted_bool = np.concatenate(shifted_bool) - sx = np.concatenate(sx, axis=0) - sy = np.concatenate(sy, axis=0) - shift = np.concatenate(shift, axis=1) - return prj_cpu, sx, sy, shift, err, pad, center - - -def warp_prj_cp(prj, sx, sy, num_batches, pad, use_corr_prj_gpu=False): - # add checks for sx, sy having the same dimension as prj - prj_cpu = np.array_split(prj, num_batches, axis=0) - _sx = np.array_split(sx, num_batches, axis=0) - _sy = np.array_split(sy, num_batches, axis=0) - for batch in range(len(prj_cpu)): - # for batch in tnrange(len(prj_cpu), desc="Shifting", leave=True): - _prj_gpu = cp.array(prj_cpu[batch], dtype=cp.float32) - num_theta = _prj_gpu.shape[0] - shift_y_condition = pad[1] - shift_x_condition = pad[0] - - for image in range(_prj_gpu.shape[0]): - if ( - np.absolute(_sx[batch][image]) < shift_x_condition - and np.absolute(_sy[batch][image]) < shift_y_condition - ): - shift_tuple = (_sy[batch][image], _sx[batch][image]) - _prj_gpu[image] = ndi_cp.shift(_prj_gpu[image], shift_tuple, order=5) - - prj_cpu[batch] = cp.asnumpy(_prj_gpu) - prj_cpu = np.concatenate(prj_cpu, axis=0) - return prj_cpu - - -# warning I don't think I fixed sign convention here. -def transform_parallel(prj, sx, sy, shift, metadata): - num_theta = prj.shape[0] - err = np.zeros((num_theta + 1, 1)) - shift_y_condition = ( - metadata["opts"]["pad"][1] * metadata["opts"]["downsample_factor"] - ) - shift_x_condition = ( - metadata["opts"]["pad"][0] * metadata["opts"]["downsample_factor"] - ) - - def transform_algorithm(prj, shift, sx, sy, m): - shiftm = shift[:, m] - # don't let it shift if the value is larger than padding - if ( - np.absolute(sx[m] + shiftm[1]) < shift_x_condition - and np.absolute(sy[m] + shiftm[0]) < shift_y_condition - ): - sx[m] += shiftm[1] - sy[m] += shiftm[0] - err[m] = np.sqrt(shiftm[0] * shiftm[0] + shiftm[1] * shiftm[1]) - - # similarity transform shifts in (x, y) - # tform = transform.SimilarityTransform(translation=(shiftm[1], shiftm[0])) - # prj[m] = transform.warp(prj[m], tform, order=5) - - # found that ndi is much faster than the above warp - # uses opposite convention - shift_tuple = (shiftm[0], shiftm[1]) - shift_tuple = tuple([-1 * x for x in shift_tuple]) - prj[m] = ndi.shift(prj[m], shift_tuple, order=5) - - Parallel(n_jobs=-1, require="sharedmem")( - delayed(transform_algorithm)(prj, shift, sx, sy, m) - for m in range(num_theta) - # for m in tnrange(num_theta, desc="Transformation", leave=True) - ) - return prj, sx, sy, err - - -def pad_projections(prj, pad, downsample_factor): - pad_ds = tuple([int(downsample_factor * x) for x in pad]) - npad_ds = ((0, 0), (pad_ds[1], pad_ds[1]), (pad_ds[0], pad_ds[0])) - prj = np.pad(prj, npad_ds, mode="constant", constant_values=0) - return prj, pad_ds diff --git a/tomopyui/widgets/center.py b/tomopyui/widgets/center.py new file mode 100644 index 0000000..e0879b7 --- /dev/null +++ b/tomopyui/widgets/center.py @@ -0,0 +1,585 @@ +import numpy as np +import copy + +# astra_cuda_recon_algorithm_kwargs, tomopy_recon_algorithm_kwargs, +# and tomopy_filter_names, extend_description_style +from tomopyui._sharedvars import * +from ipywidgets import * +from tomopy.recon.rotation import find_center_vo, find_center, find_center_pc +from tomopyui.widgets.view import BqImViewer_Center, BqImViewer_Center_Recon +from tomopyui.backend.util.center import write_center +from tomopyui.widgets.helpers import ReactiveTextButton, ReactiveIconButton +from scipy.stats import linregress + + +class Center: + """ + Class for creating a tab to help find the center of rotation. See examples + for more information on center finding. + + Attributes + ---------- + Import : `Import` + Needs an import object to be constructed. + current_center : double + Current center of rotation. Updated when center_textbox is updated. + center_guess : double + Guess value for center of rotation for automatic alignment (`~tomopy.recon.rotation.find_center`). + index_to_try : int + Index to try out when automatically (entropy) or manually trying to + find the center of rotation. + search_step : double + Step size between centers (see `tomopy.recon.rotation.write_center` or + `tomopyui.backend.util.center`). + search_range : double + Will search from [center_guess - search_range] to [center_guess + search range] + in steps of search_step. + num_iter : int + Number of iterations to use in center reconstruction. + algorithm : str + Algorithm to use in the reconstruction. Chosen from dropdown list. + filter : str + Filter to be used. Only works with fbp and gridrec. If you choose + another algorith, this will be ignored. + + """ + + def __init__(self, Import): + self.Import = Import + self.projections = Import.projections + self.Import.Center = self + self.current_center = self.Import.prenorm_uploader.projections.pxX / 2 + self.center_slice_list = [] + self.use_multiple_centers = False + self.center_guess = None + self.index_to_try = None + self.search_step = 10 + self.search_range = 100 + self.recon_slice = None + self.cen_range = None + self.use_ds = True + self.num_iter = int(1) + self.algorithm = "gridrec" + self.filter = "parzen" + self.metadata = {} + self.viewer = BqImViewer_Center() + self.viewer.create_app() + self.rec_viewer = BqImViewer_Center_Recon() + self.rec_viewer.create_app() + self.reg = None + self.header_font_style = { + "font_size": "22px", + "font_weight": "bold", + "font_variant": "small-caps", + } + self.button_font = {"font_size": "22px"} + self.button_layout = Layout(width="45px", height="40px") + self._init_widgets() + self._set_observes() + self.make_tab() + + def set_metadata(self): + """ + Sets `Center` metadata. + """ + self.metadata["center"] = self.current_center + self.metadata["center_guess"] = self.center_guess + self.metadata["index_to_try"] = self.index_to_try + self.metadata["search_step"] = self.search_step + self.metadata["search_range"] = self.search_range + self.metadata["cen_range"] = self.cen_range + self.metadata["num_iter"] = self.num_iter + self.metadata["algorithm"] = self.algorithm + self.metadata["filter"] = self.filter + + def _init_widgets(self): + + self.center_textbox = FloatText( + description="Center: ", + disabled=False, + style=extend_description_style, + layout=Layout(justify_content="center"), + ) + self.load_rough_center = Button( + description="Click to load rough center from imported data.", + disabled=False, + button_style="info", + tooltip="Loads the half-way pixel point for the center.", + icon="", + layout=Layout(width="auto", justify_content="center"), + ) + self.center_guess_textbox = FloatText( + description="Guess for center: ", + disabled=False, + style=extend_description_style, + ) + self.find_center_button = ReactiveTextButton( + self.find_center_on_click, + "Click to automatically find center (image entropy).", + "Automatically finding center (image entropy).", + "Found center.", + warning="Please import some data first.", + ) + self.find_center_vo_button = ReactiveTextButton( + self.find_center_vo_on_click, + "Click to automatically find center (Vo).", + "Automatically finding center.", + "Found center.", + warning="Please import some data first.", + ) + self.find_center_manual_button = ReactiveTextButton( + self.find_center_manual_on_click, + "Click to find center by plotting.", + "Reconstructing.", + "Now search for the center using the reconstruction slider. Add more values if your sample has multiple centers of rotation.", + warning="Your projections do not have associated theta values.", + ) + self.index_to_try_textbox = IntText( + description="Slice to use: ", + disabled=False, + style=extend_description_style, + placeholder="Default is 1/2*y pixels", + ) + self.num_iter_textbox = IntText( + description="Number of iterations: ", + disabled=False, + style=extend_description_style, + value=self.num_iter, + ) + self.search_range_textbox = IntText( + description="Search range around center:", + disabled=False, + style=extend_description_style, + value=self.search_range, + ) + self.search_step_textbox = FloatText( + description="Step size in search range: ", + disabled=False, + style=extend_description_style, + value=self.search_step, + ) + self.algorithms_dropdown = Dropdown( + options=[key for key in tomopy_recon_algorithm_kwargs], + value=self.algorithm, + description="Algorithm:", + ) + self.filters_dropdown = Dropdown( + options=[key for key in tomopy_filter_names], + value=self.filter, + description="Filter:", + ) + self.use_ds_checkbox = Checkbox( + description="Use viewer downsampling: ", + value=True, + disabled=True, + style=extend_description_style, + ) + self.projections_plot_header = "Projections" + self.projections_plot_header = Label( + self.projections_plot_header, style=self.header_font_style + ) + self.reconstructions_plot_header = "Reconstructions" + self.reconstructions_plot_header = Label( + self.reconstructions_plot_header, style=self.header_font_style + ) + + # -- Centers display ----------------------------------------------------------- + self.center_select_label = Label("Slice : Center", style=self.header_font_style) + self.center_select = Select( + disabled=True, + rows=10, + ) + self.all_centers_select_label = Label( + "Centers for reconstruction", style=self.header_font_style + ) + self.all_centers_select = Select( + disabled=True, + rows=10, + ) + self.remove_center_button = Button( + disabled=True, + icon="fa-minus-square", + tooltip="Remove selected center.", + layout=self.button_layout, + style=self.button_font, + ) + self.buttons_to_disable = [ + self.center_select, + self.remove_center_button, + ] + self.add_center_button = ReactiveIconButton( + callback=self.set_center_to_slice, + icon="fa-plus-square", + tooltip="Add center from reconstruction of this slice.", + skip_during=True, + layout=self.button_layout, + style=self.button_font, + disabled=False, + ) + self.add_center_button.button.disabled = True + self.rec_viewer.image_index_slider.disabled = True + + def _use_ds_data(self, change): + self.use_ds = self.use_ds_checkbox.value + + def _center_update(self, change): + self.current_center = change.new + self.center_guess = change.new + self.viewer.center_line.x = [ + change.new / self.viewer.pxX, + change.new / self.viewer.pxX, + ] + self.set_metadata() + + def _center_guess_update(self, change): + self.center_guess = change.new + self.viewer.center_line.x = [ + change.new / self.viewer.pxX, + change.new / self.viewer.pxX, + ] + self.set_metadata() + + def _load_rough_center_onclick(self, change): + self.center_guess = self.projections.pxX / 2 + self.current_center = self.center_guess + self.center_textbox.value = self.center_guess + self.center_guess_textbox.value = self.center_guess + self.index_to_try_textbox.value = int( + np.around(self.Import.projections.pxY / 2) + ) + self.index_to_try = self.index_to_try_textbox.value + self.set_metadata() + + def _index_to_try_update(self, change): + self.index_to_try = change.new + self.set_metadata() + + def _num_iter_update(self, change): + self.num_iter = change.new + self.set_metadata() + + def _search_range_update(self, change): + self.search_range = change.new + self.set_metadata() + + def _search_step_update(self, change): + self.search_step = change.new + self.set_metadata() + + def _update_algorithm(self, change): + self.algorithm = change.new + self.set_metadata() + + def _update_filters(self, change): + self.filter = change.new + self.set_metadata() + + def _slice_slider_update(self, change): + slider_ind = change.new + line_display = self.viewer.pxY - slider_ind + self.viewer.slice_line.y = [ + line_display / self.viewer.pxY, + line_display / self.viewer.pxY, + ] + self.index_to_try_textbox.value = int(line_display) + + def _center_textbox_slider_update(self, change): + if self.add_center_button.button.button_style == "success": + self.add_center_button.reset_state() + self.center_textbox.value = self.cen_range[change.new] + self.center_guess_textbox.value = self.cen_range[change.new] + self.current_center = self.center_textbox.value + self.viewer.update_center_line(self, change.new) + self.set_metadata() + + def set_center_to_slice(self, *args): + add_cond = [self.recon_slice == x[1] for x in self.center_slice_list] + if any(add_cond): + ind = [i for i, j in enumerate(add_cond) if j] + [self.center_slice_list.pop(i) for i in ind] + center_slice = (self.current_center, self.recon_slice) + self.center_slice_list.append(center_slice) + self.update_center_select() + + def remove_center(self, *args): + ind = self.center_select.index + if ind is None: + self.center_slice_list.pop(-1) + else: + self.center_slice_list.pop(ind) + self.update_center_select() + + def update_center_select(self): + slice_column = range(self.projections.pxY) + if len(self.center_slice_list) > 1: + centers = [cen[0] for cen in self.center_slice_list] + slices = [cen[1] for cen in self.center_slice_list] + self.reg = linregress(slices, centers) + self.reg_centers = [ + self.reg.slope * x + self.reg.intercept for x in slice_column + ] + + elif len(self.center_slice_list) == 1: + self.reg = None + self.reg_centers = [self.current_center for i in slice_column] + if self.center_slice_list == []: + self.center_select.options = ["No centers set"] + self.reg_centers = ["None" for i in slice_column] + for x in self.buttons_to_disable: + x.disabled = True + self.all_centers_select.options = [ + f"{x}" + " : " + y for x, y in zip(slice_column, self.reg_centers) + ] + else: + self.center_select.options = [ + f"{x[1]} : {x[0]:0.1f}" for x in self.center_slice_list + ] + for x in self.buttons_to_disable: + x.disabled = False + self.all_centers_select.options = [ + f"{x} : {y:0.1f}" for x, y in zip(slice_column, self.reg_centers) + ] + self.viewer.update_tilted_center_line(self) + + def _set_observes(self): + self.center_textbox.observe(self._center_update, names="value") + self.center_guess_textbox.observe(self._center_guess_update, names="value") + self.load_rough_center.on_click(self._load_rough_center_onclick) + self.index_to_try_textbox.observe(self._index_to_try_update, names="value") + self.num_iter_textbox.observe(self._num_iter_update, names="value") + self.search_range_textbox.observe(self._search_range_update, names="value") + self.search_step_textbox.observe(self._search_step_update, names="value") + self.algorithms_dropdown.observe(self._update_algorithm, names="value") + self.filters_dropdown.observe(self._update_filters, names="value") + # Callback for index going to center + self.rec_viewer.image_index_slider.observe( + self._center_textbox_slider_update, names="value" + ) + self.viewer.slice_line_slider.observe(self._slice_slider_update, names="value") + self.use_ds_checkbox.observe(self._use_ds_data, names="value") + self.remove_center_button.on_click(self.remove_center) + + def find_center_on_click(self, *args): + """ + Callback to button for attempting to find center automatically using + `tomopy.recon.rotation.find_center`. Takes index_to_try and center_guess. + This method has worked better for me, if I use a good index_to_try + and center_guess. + """ + prj_imgs, ds_value = self.get_ds_projections() + angles_rad = self.projections.angles_rad + self.current_center = find_center( + prj_imgs, + angles_rad, + ratio=0.9, + ind=self.index_to_try, + init=self.center_guess, + ) + self.center_textbox.value = self.current_center + + def find_center_vo_on_click(self, *args): + """ + Callback to button for attempting to find center automatically using + `tomopy.recon.rotation.find_center_vo`. Note: this method has not worked + well for me. + """ + prj_imgs, ds_value = self.get_ds_projections() + angles_rad = self.projections.angles_rad + try: + self.current_center = find_center_vo(prj_imgs, ncore=1) + self.center_textbox.value = self.current_center + except Excepton: + self.find_center_vo_button.warning() + + def find_center_manual_on_click(self, *args): + """ + Reconstructs at various centers when you click the button, and plots + the results with a slider so one can view. TODO: see X example. + Uses search_range, search_step, center_guess. + Creates a :doc:`hyperslicer ` + + :doc:`histogram ` plot + """ + self.add_center_button.reset_state() + if self.rec_viewer.image_index_slider.disabled: + self.rec_viewer.image_index_slider.disabled = False + self.recon_slice = copy.deepcopy(self.index_to_try) + prj_imgs, ds_value = self.get_ds_projections() + angles_rad = self.projections.angles_rad + ds_factor = np.power(2, int(ds_value + 1)) + _center_guess = copy.deepcopy(self.center_guess) / ds_factor + _search_range = copy.deepcopy(self.search_range) / ds_factor + _search_step = copy.deepcopy(self.search_step) / ds_factor + _index_to_try = int(copy.deepcopy(self.index_to_try) / ds_factor) + + cen_range = [ + _center_guess - _search_range, + _center_guess + _search_range, + _search_step, + ] + # reconstruct, but also pull the centers used out to map to center + # textbox + self.rec, cen_range = write_center( + prj_imgs, + angles_rad, + cen_range=cen_range, + ind=_index_to_try, + mask=True, + algorithm=self.algorithm, + filter_name=self.filter, + num_iter=self.num_iter, + ) + self.cen_range = [ds_factor * cen for cen in cen_range] + if self.rec is None: + self.find_center_manual_button.warning() + return + self.rec_viewer.plot(self.rec) + self.add_center_button.button.disabled = False + + def get_ds_projections(self): + ds_value = self.viewer.ds_viewer_dropdown.value + if self.use_ds: + if ds_value == -1: + prj_imgs = self.projections.data + else: + self.projections._load_hdf_ds_data_into_memory(pyramid_level=ds_value) + prj_imgs = self.projections.data_ds + elif ds_value == -1: + prj_imgs = self.projections.data + else: + self.projections._load_hdf_normalized_data_into_memory() + prj_imgs = self.projections.data + return prj_imgs, ds_value + + def refresh_plots(self): + self.viewer.plot(self.projections, no_check=True) + self.reg = None + self._load_rough_center_onclick(None) + self.find_center_button.enable() + self.find_center_manual_button.enable() + self.find_center_vo_button.enable() + + def make_tab(self): + """ + Function to create a Center object's :doc:`Tab `. + """ + + # Accordion to find center automatically + self.automatic_center_vbox = VBox( + [ + HBox( + [self.find_center_button.button, self.find_center_vo_button.button], + layout=Layout(justify_content="center"), + ), + HBox( + [ + self.center_guess_textbox, + self.index_to_try_textbox, + ], + layout=Layout(justify_content="center"), + ), + ] + ) + self.automatic_center_accordion = Accordion( + children=[self.automatic_center_vbox], + selected_index=None, + titles=("Find center automatically",), + ) + + self.viewer_hbox = HBox( + [ + VBox( + [ + self.projections_plot_header, + self.viewer.app, + ], + layout=Layout(align_items="center"), + ), + VBox( + [ + self.reconstructions_plot_header, + self.rec_viewer.app, + ], + layout=Layout(align_items="center"), + ), + ], + layout=Layout(justify_content="center", align_items="center"), + ) + # Accordion to find center manually + self.manual_center_vbox = VBox( + [ + self.viewer_hbox, + HBox([self.center_textbox], layout=Layout(justify_content="center")), + HBox( + [ + self.find_center_manual_button.button, + self.add_center_button.button, + self.remove_center_button, + ], + layout=Layout(justify_content="center"), + ), + HBox( + [ + HBox( + [ + self.center_guess_textbox, + self.index_to_try_textbox, + self.num_iter_textbox, + self.search_range_textbox, + self.search_step_textbox, + self.algorithms_dropdown, + self.filters_dropdown, + self.use_ds_checkbox, + ], + layout=Layout( + width="50%", + flex_flow="row wrap", + justify_content="center", + align_items="center", + ), + ), + HBox( + [ + VBox( + [ + self.center_select_label, + self.center_select, + ], + layout=Layout(align_items="center"), + ), + VBox( + [ + self.all_centers_select_label, + self.all_centers_select, + ], + layout=Layout(align_items="center"), + ), + ], + ), + ] + ), + ], + layout=Layout(justify_content="center"), + ) + + self.manual_center_accordion = Accordion( + children=[self.manual_center_vbox], + selected_index=0, + titles=("Find center manually",), + ) + + self.tab = VBox( + [ + VBox( + [ + HBox( + [ + self.Import.switch_data_buttons, + self.load_rough_center, + ] + ), + ], + ), + self.manual_center_accordion, + self.automatic_center_accordion, + ] + ) diff --git a/tomopyui/widgets/dataexplorer.py b/tomopyui/widgets/dataexplorer.py new file mode 100644 index 0000000..c831119 --- /dev/null +++ b/tomopyui/widgets/dataexplorer.py @@ -0,0 +1,396 @@ +import pathlib +import numpy as np +import dxchange + +from ipywidgets import * +from ipyfilechooser import FileChooser +from abc import ABC, abstractmethod +from tomopyui.widgets.view import ( + BqImViewer_Projections_Parent, + BqImViewer_Projections_Child, +) +from tomopyui.backend.io import Projections_Prenormalized +from tomopyui.widgets.analysis import Align, Recon +from tomopyui._sharedvars import * + + +class DataExplorerTab: + def __init__(self, align, recon): + self.create_tab(align, recon) + + def create_tab(self, align, recon): + self.align = RecentAlignExplorer(align) + self.align.create_app() + self.recon = RecentReconExplorer(recon) + self.recon.create_app() + self.any = AnalysisExplorer() + self.any.create_app() + self.analysis_browser_accordion = Accordion( + children=[self.any.app], + selected_index=0, + titles=("Plot Any Analysis",), + ) + # self.recent_alignment_accordion = Accordion( + # children=[self.align.app], + # selected_index=None, + # titles=("Plot Recent Alignments",), + # ) + # self.recent_recon_accordion = Accordion( + # children=[self.recon.app], + # selected_index=None, + # titles=("Plot Recent Reconstructions",), + # ) + self.tab = VBox( + children=[ + self.analysis_browser_accordion, + # self.recent_alignment_accordion, + # self.recent_recon_accordion, + ] + ) + + +class DataExplorerBase(ABC): + def __init__(self): + self.metadata = None + self.viewer_initial = BqImViewer_Projections_Parent() + self.viewer_initial.create_app() + self.viewer_analyzed = BqImViewer_Projections_Child(self.viewer_initial) + self.viewer_analyzed.create_app() + self.projections = Projections_Prenormalized() + self.analyzed_projections = Projections_Prenormalized() + + @abstractmethod + def create_app(self): + ... + + +class AnalysisExplorer(DataExplorerBase): + def __init__(self): + super().__init__() + self.filebrowser = Filebrowser() + self.filebrowser.create_app() + self.filebrowser.load_data_button.on_click(self.load_data_from_filebrowser) + + def load_data_from_filebrowser(self, change): + self.filebrowser.load_data_button.icon = "fas fa-cog fa-spin fa-lg" + self.filebrowser.load_data_button.button_style = "info" + metadata = {} + self.projections.filedir = self.filebrowser.root_filedir + self.projections.data = np.load( + self.projections.filedir / "normalized_projections.npy" + ) + if ".npy" in self.filebrowser.selected_data_filepath.name: + self.analyzed_projections.data = np.load( + self.filebrowser.selected_data_filepath + ) + elif ".tif" in self.filebrowser.selected_data_filepath.name: + self.analyzed_projections.data = np.array( + dxchange.reader.read_tiff( + self.filebrowser.selected_data_filepath + ).astype(np.float32) + ) + self.analyzed_projections.filedir = ( + self.filebrowser.selected_data_filepath.parent + ) + self.projections._check_downsampled_data() + self.viewer_initial.plot(self.projections) + self.analyzed_projections._check_downsampled_data() + self.viewer_analyzed.plot(self.analyzed_projections) + self.filebrowser.load_data_button.icon = "fa-check-square" + self.filebrowser.load_data_button.button_style = "success" + + def create_app(self): + plots = HBox( + [self.viewer_initial.app, self.viewer_analyzed.app], + layout=Layout(justify_content="center"), + ) + self.app = VBox([self.filebrowser.app, plots]) + + +class RecentAnalysisExplorer(DataExplorerBase): + def __init__(self, analysis): + super().__init__() + self.load_run_list_button = Button( + icon="download", + button_style="info", + layout=Layout(width="auto"), + ) + self.load_run_list_button.on_click(self._load_run_list_on_click) + self.run_list_selector = Select( + options=[], + rows=5, + disabled=False, + style=extend_description_style, + layout=Layout(justify_content="center"), + ) + self.run_list_selector.observe(self.choose_file_to_plot, names="value") + + def _load_run_list_on_click(self, change): + self.load_run_list_button.button_style = "info" + self.load_run_list_button.icon = "fas fa-cog fa-spin fa-lg" + self.load_run_list_button.description = "Importing run list." + # creates a list from the keys in pythonic way + # from https://stackoverflow.com/questions/11399384/extract-all-keys-from-a-list-of-dictionaries + # don't know how it works + self.run_list_selector.options = list( + set().union(*(d.keys() for d in self.analysis.run_list)) + ) + self.load_run_list_button.button_style = "success" + self.load_run_list_button.icon = "fa-check-square" + self.load_run_list_button.description = "Finished importing run list." + + def find_file_in_metadata(self, filedir): + for run in range(len(self.analysis.run_list)): + if filedir in self.analysis.run_list[run]: + metadata = {} + metadata["filedir"] = self.analysis.run_list[run][filedir][ + "parent_filedir" + ] + metadata["filename"] = self.analysis.run_list[run][filedir][ + "parent_filename" + ] + metadata["angle_start"] = self.analysis.run_list[run][filedir][ + "angle_start" + ] + metadata["angle_end"] = self.analysis.run_list[run][filedir][ + "angle_end" + ] + self.imagess[0] = TomoData(metadata=metadata).prj_imgs + metadata["filedir"] = self.analysis.run_list[run][filedir]["savedir"] + if self.obj.widget_type == "Align": + metadata["filename"] = "projections_after_alignment.tif" + else: + metadata["filename"] = "recon.tif" + self.imagess[1] = TomoData(metadata=metadata).prj_imgs + self._create_image_app() + + def choose_file_to_plot(self, change): + self.find_file_in_metadata(change.new) + + def create_app(self): + plots = HBox( + [self.viewer_initial.app, self.viewer_analyzed.app], + layout=Layout(justify_content="center"), + ) + self.app = VBox([self.load_run_list_button, self.run_list_selector, plots]) + + +class RecentAlignExplorer(RecentAnalysisExplorer): + def __init__(self, align: Align): + super().__init__(align) + self.analysis = align + self.run_list_selector.description = "Alignments:" + self.load_run_list_button.description = "Load alignments from this session." + self.create_app() + + +class RecentReconExplorer(RecentAnalysisExplorer): + def __init__(self, recon: Recon): + super().__init__(recon) + self.analysis = recon + self.run_list_selector.description = "Reconstructions:" + self.load_run_list_button.description = ( + "Load reconstructions from this session." + ) + self.create_app() + + +class Filebrowser: + def __init__(self): + + # parent directory filechooser + self.orig_data_fc = FileChooser() + self.orig_data_fc.show_only_dirs = True + self.orig_data_fc.register_callback(self.update_orig_data_folder) + self.fc_label = Label("Original Data", layout=Layout(justify_content="Center")) + self.quick_path_search = Textarea( + placeholder=r"Z:\swelborn\your\folder\with\normalized\projections", + style=extend_description_style, + disabled=False, + layout=Layout(align_items="stretch"), + ) + self.quick_path_search.observe( + self.update_filechooser_from_quicksearch, names="value" + ) + self.quick_path_label = Label("Quick path search") + + # subdirectory selector + self.subdir_list = [] + self.subdir_label = Label( + "Analysis Directories", layout=Layout(justify_content="Center") + ) + self.subdir_selector = Select(options=self.subdir_list, rows=5, disabled=False) + self.subdir_selector.observe(self.populate_methods_list, names="value") + self.selected_subdir = None + + # method selector + self.methods_list = [] + self.methods_label = Label("Methods", layout=Layout(justify_content="Center")) + self.methods_selector = Select( + options=self.methods_list, rows=5, disabled=False + ) + self.methods_selector.observe(self.populate_data_list, names="value") + self.selected_method = None + + # data selector + self.data_list = [] + self.data_label = Label("Data", layout=Layout(justify_content="Center")) + self.data_selector = Select(options=self.data_list, rows=5, disabled=False) + self.data_selector.observe(self.set_data_filename, names="value") + self.allowed_extensions = (".npy", ".tif", ".tiff") + self.options_metadata_table_output = Output() + + # load data button + self.load_data_button = Button( + icon="upload", + style={"font_size": "35px"}, + button_style="info", + layout=Layout(width="75px", height="86px"), + ) + + def _init_lists(self): + self.data_list = [] + self.selected_data_filename = None + self.selected_data_ftype = None + self.selected_subdir = None + self.methods_list = [] + self.selected_method = None + self.subdir_list = [] + self.selected_analysis_type = None + self.populate_subdirs_list() + + def update_filechooser_from_quicksearch(self, change): + path = pathlib.Path(change.new) + try: + self.orig_data_fc.reset(path=path) + except Exception as e: + with self.options_metadata_table_output: + self.options_metadata_table_output.clear_output(wait=True) + print(f"{e}") + return + else: + self.root_filedir = path + self._init_lists() + + def update_orig_data_folder(self): + self.root_filedir = pathlib.Path(self.orig_data_fc.selected_path) + self.quick_path_search.value = str(self.root_filedir) + + def populate_subdirs_list(self): + self.subdir_list = [ + pathlib.Path(f) for f in os.scandir(self.root_filedir) if f.is_dir() + ] + self.subdir_list = [ + subdir.parts[-1] + for subdir in self.subdir_list + if any(x in subdir.parts[-1] for x in ("-align", "-recon")) + ] + if self.subdir_list != []: + self.subdir_selector.options = self.subdir_list + self.subdir_selector.value = self.subdir_selector.options[0] + self.populate_methods_list() + else: + self.data_selector.options = [] + self.methods_selector.options = [] + self.subdir_selector.options = [] + + def populate_methods_list(self, *args): + if self.subdir_selector.options != tuple(): + self.selected_subdir = ( + pathlib.Path(self.root_filedir) / self.subdir_selector.value + ) + self.methods_list = [ + pathlib.Path(f) for f in os.scandir(self.selected_subdir) if f.is_dir() + ] + self.methods_list = [ + subdir.parts[-1] + for subdir in self.methods_list + if not any(x in subdir.parts[-1] for x in ("-align", "-recon")) + ] + if self.methods_list != []: + self.methods_selector.options = self.methods_list + self.methods_selector.value = self.methods_list[0] + self.populate_data_list() + else: + self.data_selector.options = [] + self.methods_selector.options = [] + + def populate_data_list(self, *args): + if self.methods_selector.options != tuple(): + self.selected_method = ( + pathlib.Path(self.root_filedir) + / self.selected_subdir + / self.methods_selector.value + ) + self.file_list = [ + pathlib.Path(f) + for f in os.scandir(self.selected_method) + if not f.is_dir() + ] + self.data_list = [ + file.name + for file in self.file_list + if any(x in file.name for x in self.allowed_extensions) + ] + if self.data_list != []: + self.data_selector.options = self.data_list + self.load_metadata() + else: + self.data_selector.options = [] + + def set_data_filename(self, change): + if self.data_selector.options != tuple(): + self.selected_data_filename = change.new + self.selected_data_filepath = ( + self.selected_method / self.selected_data_filename + ) + self.selected_data_ftype = pathlib.Path(self.selected_data_filename).suffix + if "recon" in pathlib.Path(self.selected_subdir).name: + self.selected_analysis_type = "recon" + elif "align" in pathlib.Path(self.selected_subdir).name: + self.selected_analysis_type = "align" + + def load_metadata(self): + self.imported_metadata = False + self.metadata_file = [ + self.selected_method / file.name + for file in self.file_list + if "recon_metadata.json" in file.name or "align_metadata.json" in file.name + ] + + if self.metadata_file != []: + self.metadata = load_metadata(filepath=self.metadata_file[0]) + self.options_table = metadata_to_DataFrame(self.metadata) + with self.options_metadata_table_output: + self.options_metadata_table_output.clear_output(wait=True) + display(self.options_table) + self.imported_metadata = True + + def create_app(self): + quickpath = VBox( + [ + self.quick_path_label, + self.quick_path_search, + ], + layout=Layout(align_items="center"), + ) + fc = VBox([self.fc_label, self.orig_data_fc]) + subdir = VBox([self.subdir_label, self.subdir_selector]) + methods = VBox([self.methods_label, self.methods_selector]) + data = VBox([self.data_label, self.data_selector]) + button = VBox( + [ + Label("Upload", layout=Layout(justify_content="center")), + self.load_data_button, + ] + ) + top_hb = HBox( + [fc, subdir, methods, data, button], + layout=Layout(justify_content="center"), + align_items="stretch", + ) + box = VBox( + [quickpath, top_hb, self.options_metadata_table_output], + layout=Layout(justify_content="center", align_items="center"), + ) + self.app = box diff --git a/tomopyui/widgets/helpers.py b/tomopyui/widgets/helpers.py new file mode 100644 index 0000000..1cceb8c --- /dev/null +++ b/tomopyui/widgets/helpers.py @@ -0,0 +1,370 @@ +import os +import glob +import numpy as np +import json +import functools +import tifffile as tf +import asyncio +import logging +import ipywidgets as widgets +import importlib.util +import sys +import time + +from ipywidgets import * +from abc import ABC, abstractmethod + + +def import_module_set_env(import_dict): + """ + https://stackoverflow.com/questions/1051254/check-if-python-package-is-installed + + Safely imports a module or package and sets an environment variable if it + imports (or is already imported). This is used in the main function for + checking whether or not `cupy` is installed. If it is not installed, then + options for cuda-enabled functions will be greyed out. + """ + for key in import_dict: + if key in sys.modules: + os.environ[import_dict[key]] = "True" + pass + elif (spec := importlib.util.find_spec(key)) is not None: + module = importlib.util.module_from_spec(spec) + sys.modules[key] = module + spec.loader.exec_module(module) + os.environ[import_dict[key]] = "True" + else: + os.environ[import_dict[key]] = "False" + pass + + +# From ipywidgets readthedocs +class OutputWidgetHandler(logging.Handler): + """Custom logging handler sending logs to an output widget""" + + def __init__(self, *args, **kwargs): + super(OutputWidgetHandler, self).__init__(*args, **kwargs) + layout = {"width": "100%", "height": "160px", "border": "1px solid black"} + self.out = Output(layout=layout) + + def emit(self, record): + """Overload of logging.Handler method""" + formatted_record = self.format(record) + new_output = { + "name": "stdout", + "output_type": "stream", + "text": formatted_record + "\n", + } + self.out.outputs = (new_output,) + self.out.outputs + + def show_logs(self): + """Show the logs""" + display(self.out) + + def clear_logs(self): + """Clear the current logs""" + self.out.clear_output() + + +def return_handler(logger, logging_level=None): + handler = OutputWidgetHandler() + handler.setFormatter( + logging.Formatter("%(asctime)s - [%(levelname)s] %(message)s") + ) + # handler.show_logs() + logger.addHandler(handler) + logger.setLevel(logging_level) # log at info level. + return handler, logger + + +class MetaCheckbox: + def __init__(self, description, dictionary, obj, disabled=False, value=False): + + self.checkbox = Checkbox( + description=description, value=value, disabled=disabled + ) + + def create_opt_dict_on_check(change): + dictionary[description] = change.new + obj.set_metadata() # obj needs a set_metadata function + + self.checkbox.observe(create_opt_dict_on_check, names="value") + + +def create_checkbox(description, disabled=False, value=False): + checkbox = Checkbox(description=description, disabled=disabled, value=value) + return checkbox + + +def create_checkboxes_from_opt_list(opt_list, dictionary, obj): + checkboxes = [MetaCheckbox(opt, dictionary, obj) for opt in opt_list] + return [a.checkbox for a in checkboxes] # return list of checkboxes + + +def set_checkbox_bool(checkbox_list, dictionary, obj): + def create_opt_dict_on_check(change): + dictionary[change.owner.description] = change.new + obj.set_metadata() # obj needs a set_metadata function + + for key in dictionary: + if dictionary[key]: + for checkbox in checkbox_list: + if checkbox.description == str(key): + checkbox.value = True + checkbox.observe(create_opt_dict_on_check, names="value") + elif not dictionary[key]: + for checkbox in checkbox_list: + if checkbox.description == str(key): + checkbox.value = False + checkbox.observe(create_opt_dict_on_check, names="value") + return checkbox_list + + +class Timer: + def __init__(self, timeout, callback): + self._timeout = timeout + self._callback = callback + + async def _job(self): + await asyncio.sleep(self._timeout) + self._callback() + + def start(self): + self._task = asyncio.ensure_future(self._job()) + + def cancel(self): + self._task.cancel() + + +def debounce(wait): + """Decorator that will postpone a function's + execution until after `wait` seconds + have elapsed since the last time it was invoked.""" + + def decorator(fn): + timer = None + + def debounced(*args, **kwargs): + nonlocal timer + + def call_it(): + fn(*args, **kwargs) + + if timer is not None: + timer.cancel() + timer = Timer(wait, call_it) + timer.start() + + return debounced + + return decorator + + +class ReactiveButtonBase(ABC): + """ + Base class for a reactive button. + + Parameters + ---------- + description: str + Button description, initial. + description_during: str + Button description, during reaction. + description_after: str + Button description, during reaction. + icon: str + FontAwesome icon, initial. + icon_during: str + FontAwesome icon, during reaction. + icon_after: str + FontAwesome icon, after reaction. + button_style: str + Changes button color, initial. Can be "info", "success", "", "warning", + or "danger" + button_style_during: str + Changes button color, during callback. Can be "info", "success", "", "warning", + or "danger" + button_style_after: str + Changes button color, after callback. Can be "info", "success", "", "warning", + or "danger" + """ + + def __init__( + self, + callback, + description="", + description_during="", + description_after="", + icon="", + icon_during="fas fa-cog fa-spin fa-lg", + icon_after="fa-check-square", + button_style="", + button_style_during="info", + button_style_after="success", + style=None, + disabled=False, + layout=None, + tooltip=None, + warning="That button click didn't work.", + ): + self.button = Button() + self.callback = callback + self.description = description + self.description_during = description_during + self.description_after = description_after + self.button_style = button_style + self.button_style_during = button_style_during + self.button_style_after = button_style_after + self.icon = icon + self.icon_during = icon_during + self.icon_after = icon_after + self.disabled = disabled + self.button_style_warning = "warning" + self.layout = layout + self.style = style + self.tooltip = tooltip + self.warning = warning + self.reset_state() + self.button.on_click(self.run_callback) + + def reset_state(self): + self.button.description = self.description + self.button.button_style = self.button_style + self.button.icon = self.icon + self.button.disabled = self.disabled + self.button.layout = self.layout + self.button.tooltip = self.tooltip + if self.style is not None: + self.button.style = self.style + if self.layout is not None: + self.button.layout = self.layout + + def switch_disabled(self): + if self.disabled: + self.disabled = False + self.button.disabled = False + else: + self.disabled = True + self.button.disabled = True + + def disable(self): + self.reset_state() + self.button.disabled = True + + def enable(self): + self.reset_state() + self.button.disabled = False + + def warning(self, *args): + self.button.button_style = self.button_style_warning + self.button.description = self.warning + self.button.icon = "exclamation-triangle" + + @abstractmethod + def run_callback(self, *args): + ... + + +class ReactiveTextButton(ReactiveButtonBase): + def __init__( + self, + callback, + description, + description_during, + description_after, + warning="That button didn't work.", + layout=Layout(width="auto", height="auto", align_items="stretch"), + ): + super().__init__( + callback, + description=description, + description_during=description_during, + description_after=description_after, + layout=layout, + warning=warning, + ) + + def run_callback(self, *args): + self.button.button_style = self.button_style_during + self.button.icon = self.icon_during + self.button.description = self.description_during + self.callback() + self.button.button_style = self.button_style_after + self.button.icon = self.icon_after + self.button.description = self.description_after + + +class ReactiveIconButton(ReactiveButtonBase): + def __init__(self, callback, icon, tooltip, skip_during=False, *args, **kwargs): + self.skip_during = skip_during + super().__init__(callback, icon=icon, tooltip=tooltip, *args, **kwargs) + + def run_callback(self, *args): + if not self.skip_during: + self.button.button_style = self.button_style_during + self.button.icon = self.icon_during + self.callback() + self.button.button_style = self.button_style_after + self.button.icon = self.icon_after + + +class SwitchOffOnIconButton(ReactiveButtonBase): + """ + Subclass for buttons that turn off and on (green on, grey off). + """ + + def __init__(self, callback_on, callback_off, icon): + super().__init__(None, icon=icon, icon_during=icon, icon_after=icon) + self.callback_on = callback_on + self.callback_off = callback_off + self.button_on = False + + def run_callback(self, *args): + if self.button_on: + self.callback_off() + self.button.button_style = "" + self.button_on = False + else: + self.callback_on() + self.button.button_style = "success" + self.button_on = True + + +class ImportButton(ReactiveButtonBase): + """ + Import button found throughout the app. + """ + + def __init__(self, callback): + super().__init__( + callback, + icon="upload", + tooltip="Load your data into memory", + style={"font_size": "35px"}, + layout=Layout(width="75px", height="86px"), + disabled=True, + ) + self.button_on = False + + def run_callback(self, *args): + self.button.button_style = self.button_style_during + self.button.icon = self.icon_during + self.button.description = self.description_during + self.callback() + self.button.button_style = self.button_style_after + self.button.icon = self.icon_after + self.button.description = self.description_after + + def switch_disabled(self): + super().switch_disabled() + if self.disabled: + self.button.button_style = "info" + else: + self.button.button_style = "" + + def enable(self): + self.disabled = False + self.button.button_style = self.button_style + self.button.disabled = False + self.button.icon = self.icon + self.button.button_style = self.button_style_during diff --git a/tomopyui/widgets/imports.py b/tomopyui/widgets/imports.py new file mode 100644 index 0000000..9f0a616 --- /dev/null +++ b/tomopyui/widgets/imports.py @@ -0,0 +1,1696 @@ +import time +import logging +import numpy as np +import pathlib +import functools +import re +import os +import json +import tifffile as tf +import copy +import h5py + +from ipyfilechooser import FileChooser +from ipyfilechooser.errors import InvalidPathError, InvalidFileNameError +from ipywidgets import * +from abc import ABC, abstractmethod +from tomopyui._sharedvars import * +from tomopyui.widgets.view import BqImViewer_Projections_Parent +from tomopyui.backend.io import ( + RawProjectionsHDF5_ALS832, + RawProjectionsHDF5_APS, + RawProjectionsXRM_SSRL62C, + Projections_Prenormalized, + Metadata_Align, + Metadata, + Metadata_ALS_832_Raw, + Metadata_ALS_832_Prenorm, + Metadata_APS_Raw, + Metadata_APS_Prenorm, + Metadata_General_Prenorm, + RawProjectionsTiff_SSRL62B, +) +from tomopyui.widgets import helpers +from tomopyui.widgets.helpers import ( + ReactiveTextButton, + ReactiveIconButton, + SwitchOffOnIconButton, + ImportButton, +) + + +class ImportBase(ABC): + """ + An overarching class that controls the rest of the processing pipeline. + Holds `Uploader` instances, which can be used for uploading data in the form of + `Projections` instances. The prenorm_uploader is general, in that it can be used for + any type of data. This is why it is in the base class. The subclasses of ImportBase + are for creating and holding raw `Uploader` instances. + """ + + def __init__(self): + + # Init raw/prenorm button switches. These are at the top of each of the Prep, + # Alignment, and Recon tabs to switch between using raw/uploaded data or + # prenormalized data. See the `ReactiveButton` helper class. + self.use_raw_button = ReactiveTextButton( + self.enable_raw, + "Click to use raw/normalized data from the Import tab.", + "Updating plots.", + "Raw/normalized data from Import tab in use for alignment/reconstruction.", + ) + self.use_prenorm_button = ReactiveTextButton( + self.enable_prenorm, + "Click to use prenormalized data from the Import tab.", + "Updating plots.", + "Prenormalized data from Import tab in use for alignment/reconstruction.", + ) + + # Creates the prenormalized uploader (general) + # raw_uploader is created in the beamline-specific subclasses. + # Initializes to setting the `Import` instance's projections to be the prenorm + # projections, but this is switched with the "enable_raw" or "disable_raw" + # functions and buttons, defined above. Maybe I am getting terminology incorrect + # but this is kind of like a switchable singleton. + self.prenorm_uploader = PrenormUploader(self) + self.projections = self.prenorm_uploader.projections + self.uploader = self.prenorm_uploader + + # Init logger to be used throughout the app. + # TODO: This does not need to be under Import. + self.log = logging.getLogger(__name__) + self.log_handler, self.log = helpers.return_handler(self.log, logging_level=20) + + def enable_prenorm(self, *args): + """ + Makes the prenorm_uploader projections the projections used throughout the app. + Refreshes plots in the other tabs to match these. + """ + self.use_raw_button.reset_state() + self.use_raw = False + self.use_prenorm = True + if self.raw_uploader.projections.hdf_file is not None: + self.raw_uploader.projections._close_hdf_file() + self.projections = self.prenorm_uploader.projections + if self.projections.hdf_file is not None: + self.projections._open_hdf_file_read_only() + self.projections._load_hdf_ds_data_into_memory() + # self.projections._check_downsampled_data() + self.uploader = self.prenorm_uploader + self.Prep.projections = self.projections + self.Center.projections = self.projections + self.Recon.projections = self.projections + self.Align.projections = self.projections + self.Recon.refresh_plots() + self.Align.refresh_plots() + self.Center.refresh_plots() + self.Prep.refresh_plots() + self.projections._close_hdf_file() + + def enable_raw(self, *args): + """ + Makes the raw_uploader projections the projections used throughout the app. + Refreshes plots in the other tabs to match these. + """ + self.use_prenorm_button.reset_state() + self.use_raw = True + self.use_prenorm = False + if self.prenorm_uploader.projections.hdf_file is not None: + self.prenorm_uploader.projections._close_hdf_file() + self.projections = self.raw_uploader.projections + if self.projections.hdf_file is not None: + self.projections._open_hdf_file_read_only() + self.projections._load_hdf_ds_data_into_memory() + self.projections._check_downsampled_data() + self.uploader = self.raw_uploader + self.Prep.projections = self.projections + self.Center.projections = self.projections + self.Recon.projections = self.projections + self.Align.projections = self.projections + self.Recon.refresh_plots() + self.Align.refresh_plots() + self.Center.refresh_plots() + self.Prep.refresh_plots() + self.projections._close_hdf_file() + + @abstractmethod + def make_tab(self): + ... + + +class Import_SSRL62B(ImportBase): + """""" + + def __init__(self): + super().__init__() + self.angles_from_filenames = True + self.raw_uploader = RawUploader_SSRL62B(self) + self.make_tab() + + def make_tab(self): + + self.switch_data_buttons = HBox( + [self.use_raw_button.button, self.use_prenorm_button.button], + layout=Layout(justify_content="center"), + ) + + # raw_import = HBox([item for sublist in raw_import for item in sublist]) + self.raw_accordion = Accordion( + children=[ + VBox( + [ + HBox( + [self.raw_uploader.metadata_table_output], + layout=Layout(justify_content="center"), + ), + HBox( + [self.raw_uploader.progress_output], + layout=Layout(justify_content="center"), + ), + self.raw_uploader.app, + ] + ), + ], + selected_index=None, + titles=("Import and Normalize Raw Data",), + ) + + self.prenorm_accordion = Accordion( + children=[ + VBox( + [ + HBox( + [self.prenorm_uploader.metadata_table_output], + layout=Layout(justify_content="center"), + ), + self.prenorm_uploader.app, + ] + ), + ], + selected_index=None, + titles=("Import Prenormalized Data",), + ) + + self.tab = VBox( + [ + self.raw_accordion, + self.prenorm_accordion, + ] + ) + + +class Import_SSRL62C(ImportBase): + """""" + + def __init__(self): + super().__init__() + self.angles_from_filenames = True + self.raw_uploader = RawUploader_SSRL62C(self) + self.make_tab() + + def make_tab(self): + + self.switch_data_buttons = HBox( + [self.use_raw_button.button, self.use_prenorm_button.button], + layout=Layout(justify_content="center"), + ) + + # raw_import = HBox([item for sublist in raw_import for item in sublist]) + self.raw_accordion = Accordion( + children=[ + VBox( + [ + HBox( + [self.raw_uploader.metadata_table_output], + layout=Layout(justify_content="center"), + ), + HBox( + [self.raw_uploader.progress_output], + layout=Layout(justify_content="center"), + ), + self.raw_uploader.app, + ] + ), + ], + selected_index=None, + titles=("Import and Normalize Raw Data",), + ) + + self.prenorm_accordion = Accordion( + children=[ + VBox( + [ + HBox( + [self.prenorm_uploader.metadata_table_output], + layout=Layout(justify_content="center"), + ), + self.prenorm_uploader.app, + ] + ), + ], + selected_index=None, + titles=("Import Prenormalized Data",), + ) + + self.tab = VBox( + [ + # self.switch_data_buttons, + self.raw_accordion, + self.prenorm_accordion, + ] + ) + + +class Import_ALS832(ImportBase): + """""" + + def __init__(self): + super().__init__() + self.raw_uploader = RawUploader_ALS832(self) + self.make_tab() + + def make_tab(self): + + self.switch_data_buttons = HBox( + [self.use_raw_button.button, self.use_prenorm_button.button], + layout=Layout(justify_content="center"), + ) + + # raw_import = HBox([item for sublist in raw_import for item in sublist]) + self.raw_accordion = Accordion( + children=[ + VBox( + [ + HBox( + [self.raw_uploader.metadata_table_output], + layout=Layout(justify_content="center"), + ), + HBox( + [self.raw_uploader.progress_output], + layout=Layout(justify_content="center"), + ), + self.raw_uploader.app, + ] + ), + ], + selected_index=None, + titles=("Import and Normalize Raw Data",), + ) + + self.prenorm_accordion = Accordion( + children=[ + VBox( + [ + HBox( + [self.prenorm_uploader.metadata_table_output], + layout=Layout(justify_content="center"), + ), + self.prenorm_uploader.app, + ] + ), + ], + selected_index=None, + titles=("Import Prenormalized Data",), + ) + + self.tab = VBox( + [ + self.raw_accordion, + self.prenorm_accordion, + ] + ) + + +class Import_APS(Import_ALS832): + def __init__(self): + super().__init__() + self.raw_uploader = RawUploader_APS(self) + self.make_tab() + + +class UploaderBase(ABC): + """""" + + def __init__(self): + # Headers style, make it look halfway decent. + self.header_font_style = { + "font_size": "22px", + "font_weight": "bold", + "font_variant": "small-caps", + # "text_color": "#0F52BA", + } + + # File browser + self.filechooser = FileChooser() + self.filechooser.register_callback(self._update_quicksearch_from_filechooser) + self.file_chooser_label = Label( + "Find data folder", style=self.header_font_style + ) + self.filedir = pathlib.Path() + self.filename = pathlib.Path() + + # Quick path search textbox + self.quick_path_search = Textarea( + placeholder=r"Z:\swelborn", + style=extend_description_style, + disabled=False, + layout=Layout(align_items="stretch"), + ) + self.quick_path_search.observe( + self._update_filechooser_from_quicksearch, names="value" + ) + self.quick_path_label = Label("Quick path search:") + + # Import button, disabled before you put anything into the quick path + # see helpers class + self.import_button = ImportButton(self.import_data) + + # Where metadata will be displayed + self.metadata_table_output = Output() + + # Progress bar showing upload progress + self.progress_output = Output() + + # Save tiff checkbox + self.save_tiff_on_import_checkbox = Checkbox( + description="Save .tif on import.", + value=False, + style=extend_description_style, + disabled=False, + ) + + # Create data visualizer + self.viewer = BqImViewer_Projections_Parent() + self.viewer.create_app() + + # bool for whether or not metadata was imported + self.imported_metadata = False + + # Will update based on the import status + self.import_status_label = Label(layout=Layout(justify_content="center")) + + # Will update when searching for metadata + self.find_metadata_status_label = Label(layout=Layout(justify_content="center")) + + def check_filepath_exists(self, path): + self.filename = None + self.filedir = None + if path.is_dir(): + self.filedir = path + self.filechooser.reset(path=path) + elif path.is_file(): + self.filedir = path.parent + self.filename = str(path.name) + self.filechooser.reset(path=path.parent, filename=path.name) + else: + self.find_metadata_status_label.value = ( + "No file or directory with that name." + ) + return False + return True + + def _update_filechooser_from_quicksearch(self, change): + """ + Checks path to see if it exists, checks file directory for strings in + self.filetypes_to_look_for. Then it runs the subclass-specific function + self.update_filechooser_from_quicksearch. + + Parameters + ---------- + change + This comes from the callback of the quick search textbox. change.new is + a str. To inspect what else comes with change.new, you can edit this by + """ + path = pathlib.Path(change.new) + self.import_button.disable() + self.imported_metadata = False + if not self.check_filepath_exists(path): + return + with self.metadata_table_output: + self.metadata_table_output.clear_output(wait=True) + display(self.find_metadata_status_label) + try: + found_files = self.projections._file_finder( + self.filedir, self.filetypes_to_look_for + ) + assert found_files != [] + except AssertionError: + filetype_str = [x + " or " for x in self.filetypes_to_look_for[:-1]] + filetype_str = "".join(filetype_str + [self.filetypes_to_look_for[-1]]) + self.find_metadata_status_label.value = ( + "No " + + filetype_str + + " files found in this directory. " + + self.files_not_found_str + ) + self.files_found = False + else: + # calls subclass method. + self.files_found = True + self.update_filechooser_from_quicksearch(found_files) + + def _update_quicksearch_from_filechooser(self): + """ + Updates the quick search box after selection from the file chooser. This + triggers self._update_filechooser_from_quicksearch(), so not much logic is + needed other than setting the filedirectory and filename. + """ + self.filedir = pathlib.Path(self.filechooser.selected_path) + self.filename = self.filechooser.selected_filename + self.quick_path_search.value = str(self.filedir / self.filename) + + # Each uploader has a method to update the filechooser from the quick search path, + # and vice versa. + @abstractmethod + def update_filechooser_from_quicksearch(self, change): + ... + + # Each uploader has a method to import data given the filepath chosen in the + # filechooser/quicksearch box + @abstractmethod + def import_data(self): + ... + + +class PrenormUploader(UploaderBase): + """""" + + def __init__(self, Import): + super().__init__() + + # store parent Import instance for changing "use_raw" or "use_prenorm" buttons + # after uploading data. + self.Import = Import + self.metadatas = None + self.projections = Projections_Prenormalized() # see io.py in backend + self.filechooser.title = "Import prenormalized data:" + self.viewer.rectangle_selector_on = False # TODO: Remove? + + # Quick search/filechooser will look for these types of files. + self.filetypes_to_look_for = [".json", ".npy", ".tif", ".tiff", ".hdf5", ".h5"] + self.files_not_found_str = "" + self.filetypes_to_look_for_images = [".npy", ".tif", ".tiff", ".hdf5", ".h5"] + + # Create widgets for required data entry if the prenorm data does not have + # proper metadata to run with the rest of the program. For ex, these boxes will + # pop up if trying to import a normalized tiff stack from another program + self.metadata_input_output = Output() + self.metadata_input_output_label = Label( + "Set Metadata Here", + layout=Layout(justify_content="center"), + style=self.header_font_style, + ) + self.start_angle_textbox = FloatText( + value=-90, + description="Starting angle (\u00b0): ", + disabled=True, + style=extend_description_style, + ) + self.angle_end_textbox = FloatText( + value=90, + description="Ending angle (\u00b0): ", + disabled=True, + style=extend_description_style, + ) + self.px_size_textbox = FloatText( + value=30, + description="Pixel size (binning 1): ", + disabled=True, + style=extend_description_style, + ) + self.px_size_units_dropdown_opts = ["nm", "\u00b5m", "mm", "cm"] + self.px_size_units_dropdown = Dropdown( + value="nm", + options=self.px_size_units_dropdown_opts, + disabled=True, + style=extend_description_style, + layout=Layout(width="auto"), + ) + self.energy_textbox = FloatText( + value=8000, + description="Energy: ", + disabled=True, + style=extend_description_style, + ) + self.energy_units_dropdown = Dropdown( + value="eV", + options=["eV", "keV"], + disabled=True, + style=extend_description_style, + layout=Layout(width="auto"), + ) + self.binning_dropdown = Dropdown( + value=2, + description="Binning: ", + options=[("1", 1), ("2", 2), ("4", 4)], + disabled=True, + style=extend_description_style, + layout=Layout(width="auto"), + ) + self.angular_resolution_textbox = FloatText( + value=0.25, + description="Angular Resolution (\u00b0):", + disabled=True, + style=extend_description_style, + ) + + # Collection of widgets (list) to enable or disable, depending on whether or not + # metadata could be imported when choosing a file or file directory + self.required_parameters = [ + "start_angle", + "end_angle", + "pixel_size", + "pixel_units", + "energy_float", + "energy_units", + "binning", + "angular_resolution", + ] + self.init_required_values = [-90, 90, 30, "nm", 8000, "eV", 2, 0.25] + self.widgets_to_enable = [ + self.start_angle_textbox, + self.angle_end_textbox, + self.px_size_textbox, + self.px_size_units_dropdown, + self.energy_textbox, + self.energy_units_dropdown, + self.binning_dropdown, + self.angular_resolution_textbox, + ] + self.required_metadata = zip(self.required_parameters, self.widgets_to_enable) + + # Creating callbacks programatically like this required due to namespace + # issues. Could make this into a metadata widget class in the future. TODO + for name, widget in self.required_metadata: + widget.observe(self.create_metadata_callback(name, widget)) + + # Selection widget for tifffs and npys in a folder + self.images_in_dir_select = Select( + options=[], + disabled=False, + ) + self.images_in_dir_select.observe(self.images_in_dir_callback, names="index") + + # If there is many tiffs in the folder, turn this checkbox on + self.tiff_folder_checkbox = Checkbox( + description="Tiff Folder?", + style=extend_description_style, + value=False, + disabled=True, + ) + + self.tiff_folder_checkbox.observe(self.tiff_folder_on, names="value") + + self.metadata_widget_box = VBox( + [ + self.metadata_input_output_label, + self.start_angle_textbox, + self.angle_end_textbox, + self.angular_resolution_textbox, + HBox( + [ + self.px_size_textbox, + self.px_size_units_dropdown, + self.binning_dropdown, + ] + ), + HBox([self.energy_textbox, self.energy_units_dropdown]), + ] + ) + # Creates the app that goes into the Import object + self.create_app() + + def create_and_display_metadata_tables(self): + """ + Creates metadata dataframe and displays it in self.metadata_table_output. + """ + # [ + # metadata.set_metadata(self.projections) + # for metadata in self.projections.metadatas + # ] + [metadata.create_metadata_box() for metadata in self.projections.metadatas] + self.metadata_vboxes = [x.metadata_vbox for x in self.projections.metadatas] + if not self.metadata_already_displayed: + with self.metadata_table_output: + self.metadata_table_output.clear_output(wait=True) + [display(m) for m in self.metadata_vboxes] + + def tiff_folder_on(self, change): + """ + Turns on the tiff folder option, where it will try to load all the tiffs in the + folder (a tiff sequence). Some redundancies + """ + if change.new: + self.projections.metadata.metadata["tiff_folder"] = True + self.projections.tiff_folder = True + self.tiff_folder = True + self.projections.metadata.metadata["pxZ"] = self.tiff_count_in_folder + self.projections.metadata.metadata["pxX"] = self.image_size_list[0][2] + self.projections.metadata.metadata["pxY"] = self.image_size_list[0][1] + self.import_button.enable() + self.images_in_dir_select.disabled = True + self.create_and_display_metadata_tables() + if not change.new: + if self.images_in_dir_select.index is not None: + self.import_button.enable() + else: + self.import_button.disable() + self.images_in_dir_select.disabled = False + self.projections.metadata.metadata["tiff_folder"] = False + self.projections.tiff_folder = False + self.tiff_folder = False + self.images_in_dir_callback(None, from_select=False) + + def images_in_dir_callback(self, change, from_select=True): + """ + Callback for the image selection widget. Displays updated metadata table with + image pixel sizes if you select a tiff or npy. + + Parameters + ---------- + from_select: bool + If this is false, ind will be the current selected index. Needed for + calling back from self.tiff_folder_on. + """ + if not from_select: + ind = self.images_in_dir_select.index + else: + ind = change.new + if ind is not None: + self.projections.metadata.metadata["pxX"] = self.image_size_list[ind][2] + self.projections.metadata.metadata["pxY"] = self.image_size_list[ind][1] + self.projections.metadata.metadata["pxZ"] = self.image_size_list[ind][0] + self.projections.filedir = self.filedir + self.filename = str(self.images_in_dir[ind].name) + self.projections.filename = str(self.images_in_dir[ind].name) + self.create_and_display_metadata_tables() + self.import_button.enable() + else: + self.import_button.disable() + + def reset_required_widgets(self): + """ + Resets metadata widgets to default values. This also sets the metadata dict + values (widget.value = val triggers metadata setting callbacks) + """ + for name, val, widget in zip( + self.required_parameters, self.init_required_values, self.widgets_to_enable + ): + if name not in self.projections.metadata.metadata: + widget.value = val + + def create_metadata_callback(self, name, widget): + """ + Callback for setting metadata. Creates metadata table and displays it if all + required metadata are in self.projections.metadata.metadata + """ + + def callback(change): + if not self.imported_metadata: + self.projections.metadata.metadata[name] = widget.value + if all( + x in self.projections.metadata.metadata + for x in self.required_parameters + ): + self.create_and_display_metadata_tables() + + return callback + + # this was copied from PrenormalizedProjections get_img_shape. + # TODO: find better place. helper functions? + def extract_image_sizes(self, image_list): + size_list = [] + + self.tiff_count_in_folder = len( + [file for file in image_list if file.suffix in [".tiff", ".tif"]] + ) + for image in image_list: + if image.suffix == ".tif" or image.suffix == ".tiff": + with tf.TiffFile(image) as tif: + # if you select a file instead of a file path, it will try to + # bring in the full filedir + if self.tiff_count_in_folder > 1: + self.tiff_folder_checkbox.disabled = False + self.tiff_folder_checkbox.disabled = False + else: + self.tiff_folder_checkbox.disabled = True + self.tiff_folder_checkbox.value = False + try: + imagesize = tif.pages[0].tags["ImageDescription"] + size = json.loads(imagesize.value)["shape"] + sizeX = size[2] + except Exception: + sizeZ = self.tiff_count_in_folder + sizeY = tif.pages[0].tags["ImageLength"].value + sizeX = tif.pages[0].tags["ImageWidth"].value + else: + sizeZ = size[0] + sizeY = size[1] + sizeX = size[2] + + elif image.suffix == ".npy": + size = np.load(image, mmap_mode="r").shape + sizeZ = size[0] + sizeY = size[1] + sizeX = size[2] + + elif image.suffix == ".hdf5" or image.suffix == ".h5": + self.projections.filepath = self.filedir / str(image) + try: + with h5py.File(self.projections.filepath) as f: + size = f[self.projections.hdf_key_norm_proj].shape + sizeZ = size[0] + sizeY = size[1] + sizeX = size[2] + except Exception as e: + sizeZ = 1 + sizeY = 1 + sizeX = 1 + + size_tuple = (sizeZ, sizeY, sizeX) + size_list.append(size_tuple) + + return size_list + + def check_for_images(self): + try: + self.images_in_dir = self.projections._file_finder_fullpath( + self.filedir, self.filetypes_to_look_for_images + ) + assert self.images_in_dir != [] + except AssertionError: + filetype_str = [x + " or " for x in self.filetypes_to_look_for_images[:-1]] + filetype_str = "".join( + filetype_str + [self.filetypes_to_look_for_images[-1]] + ) + self.find_metadata_status_label.value = ( + "No " + + filetype_str + + "files found in this directory. " + + self.files_not_found_str + ) + return False + else: + self.image_size_list = self.extract_image_sizes(self.images_in_dir) + self.images_in_dir_select.options = [x.name for x in self.images_in_dir] + self.images_in_dir_select.index = None + return True + + def enter_metadata_output(self): + """ + Enables/disables widgets if they are not/are already in the metadata. Displays + the box if any of the widgets are not disabled. + """ + + # Zip params/initial values/widgets and set it to default if not in metadata + self.required_metadata = zip( + self.required_parameters, self.init_required_values, self.widgets_to_enable + ) + # if required parameter not in current metadata instance, enable it and set + # metadata to default value. if it is, disable and set value to the metadata + # value + for name, val, widget in self.required_metadata: + if name not in self.projections.metadata.metadata: + widget.disabled = False + widget.value = val + self.projections.metadata.metadata[name] = val + else: + widget.disabled = True + widget.value = self.projections.metadata.metadata[name] + + # create metadata dataframe. The dataframe will only appear once all the + # required metadata is inside the metadata instance + self.create_and_display_metadata_tables() + + # pop up the widget box if any are disabled + if not all([x.disabled for x in self.widgets_to_enable]): + with self.metadata_input_output: + display(self.metadata_widget_box) + + def _update_filechooser_from_quicksearch(self, change): + self.metadata_input_output.clear_output() + super()._update_filechooser_from_quicksearch(change) + + def update_filechooser_from_quicksearch(self, json_files): + self.images_in_dir = None + try: + self.metadata_filepath = [ + self.filedir / file for file in json_files if "_metadata" in file + ] + assert self.metadata_filepath != [] + except AssertionError: # this means no metadata files in this directory + self.imported_metadata = False + self.metadata_already_displayed = False + if self.check_for_images(): + + # Initialize new metadata - old one might not have correct values + self.projections.metadata = Metadata_General_Prenorm() + self.projections.metadata.filedir = self.filedir + self.projections.metadatas = [self.projections.metadata] + self.enter_metadata_output() + self.find_metadata_status_label.value = ( + "No metadata associated with this file. " + + "Please enter metadata below before uploading " + + "so that tomopyui functions properly." + ) + else: + self.find_metadata_status_label.value = ( + "This directory has no metadata" + + " files and no images that you can upload." + ) + self.import_button.disable() + else: + self.metadata_filepath = self.metadata_filepath[0] + self.projections.metadatas = Metadata.get_metadata_hierarchy( + self.metadata_filepath + ) + self.metadata_already_displayed = False + if self.projections.metadatas != []: + parent = {} + for i, metadata in enumerate(self.projections.metadatas): + metadata.filepath = copy.copy(self.metadata_filepath) + if i == 0: + metadata.load_metadata() + else: + metadata.metadata = parent + metadata.set_attributes_from_metadata(self.projections) + if "parent_metadata" in metadata.metadata: + parent = metadata.metadata["parent_metadata"].copy() + self.create_and_display_metadata_tables() + self.metadata_already_displayed = True + self.imported_metadata = True + self.import_button.enable() + if len(self.projections.metadatas) > 1: + if ( + self.projections.metadatas[-1].metadata["metadata_type"] + == "General_Normalized" + ): + self.projections.metadata = self.projections.metadatas[-1] + else: + self.projections.metadata = self.projections.metadatas[-2] + else: + self.projections.metadata = self.projections.metadatas[0] + if self.check_for_images(): + self.metadata_input_output.clear_output() + if self.filename is not None: + self.find_metadata_status_label.value = ( + "This directory has all the metadata you need. " + + " Proceed to upload your data (click blue button)." + ) + else: + self.find_metadata_status_label.value = ( + "This directory has all the metadata you need." + + " If your images are a lot of separate" + + " images, then upload the directory now. Otherwise," + + " select a single image to upload using the file browser." + ) + else: + self.find_metadata_status_label.value = ( + "This directory has metadata" + + " but no prenormalized data you can upload." + ) + + def import_data(self): + """ + Function that calls on io.py (projections) to run import. The function chosen + will depend on whether one is uploading a folder, or a + """ + + with self.metadata_table_output: + self.metadata_table_output.clear_output(wait=True) + if self.imported_metadata: + self.metadata_input_output.clear_output() + self.create_and_display_metadata_tables() + else: + self.metadata_input_output.clear_output() + + display(self.import_status_label) + if self.filename == "" or self.filename is None: + self.projections.import_filedir_projections(self) + else: + self.projections.import_file_projections(self) + self.import_status_label.value = ( + "Plotting data (downsampled for viewer to 0.5x)." + ) + self.viewer.plot(self.projections, ds=True, no_check=True) + self.Import.use_raw_button.reset_state() + self.Import.use_prenorm_button.reset_state() + if "import_time" in self.projections.metadata.metadata: + self.import_status_label.value = ( + "Import, downsampling (if any), and" + + " plotting complete in " + + f"~{self.projections.metadata.metadata['import_time']:.0f}s." + ) + self.Import.use_prenorm_button.run_callback() + + def create_app(self): + self.app = HBox( + [ + VBox( + [ + self.quick_path_label, + HBox( + [ + self.quick_path_search, + VBox( + [ + self.images_in_dir_select, + self.tiff_folder_checkbox, + self.save_tiff_on_import_checkbox, + ] + ), + self.import_button.button, + ] + ), + self.filechooser, + self.metadata_input_output, + ], + ), + self.viewer.app, + ], + layout=Layout(justify_content="center"), + ) + + +class TwoEnergyUploader(PrenormUploader): + """""" + + def __init__(self, viewer): + UploaderBase.__init__(self) + self.projections = Projections_Prenormalized() + self.filechooser.title = "Import prenormalized data:" + self.viewer = viewer + self.viewer.create_app() + self.imported_metadata = False + self.viewer.rectangle_selector_on = False + self.energy_textbox = FloatText( + description="Energy: ", + disabled=True, + style=extend_description_style, + ) + self.pixel_size_textbox = FloatText( + description="Pixel Size: ", + disabled=True, + style=extend_description_style, + ) + self.widgets_to_enable = [self.energy_textbox, self.pixel_size_textbox] + + def import_data(self): + tic = time.perf_counter() + with self.metadata_table_output: + self.metadata_table_output.clear_output(wait=True) + if self.imported_metadata: + display(self.projections.metadata.dataframe) + display(self.import_status_label) + if self.filename == "" or self.filename is None: + self.import_status_label.value = "Importing file directory." + self.projections.import_filedir_projections(self) + else: + self.import_status_label.value = "Importing single file." + self.projections.import_file_projections(self) + self.import_status_label.value = "Checking for downsampled data." + self.projections._check_downsampled_data(label=self.import_status_label) + self.import_status_label.value = ( + "Plotting data (downsampled for viewer to 0.25x)." + ) + if not self.imported_metadata: + self.projections.energy = self.energy_textbox.value + self.projections.current_pixel_size = self.pixel_size_textbox.value + self.viewer.plot(self.projections) + toc = time.perf_counter() + + +class ShiftsUploader(UploaderBase): + """""" + + def __init__(self, Prep): + super().__init__() + self.Prep = Prep + self.import_button.callback = Prep.add_shift # callback to add shifts to list + self.projections = Prep.projections + self.filechooser.title = "Import shifts: " + self.imported_metadata = False + self.filetypes_to_look_for = ["sx.npy", "sy.npy", "alignment_metadata.json"] + self.files_not_found_str = "" + + def update_filechooser_from_quicksearch(self, shifts_files): + self.import_button.disable() + self.shifts_from_json = False + self.shifts_from_npy = False + if "sx.npy" in shifts_files: # TODO: eventually deprecate + self.shifts_from_json = False + self.shifts_from_npy = True + elif "alignment_metadata.json" in shifts_files: + self.shifts_from_json = True + self.shifts_from_npy = False + if self.shifts_from_json: + self.align_metadata_filepath = self.filedir / "alignment_metadata.json" + self.imported_metadata = False + self.import_shifts_from_metadata() + self.update_shift_lists() + self.imported_metadata = True + else: + self.imported_metadata = False + self.import_shifts_from_npy() + self.update_shift_lists() + + def import_shifts_from_npy(self): + self.sx = np.load(self.filedir / "sx.npy") + self.sy = np.load(self.filedir / "sy.npy") + self.conv = np.load(self.filedir / "conv.npy") + self.align_metadata = Metadata_Align() + self.align_metadata.filedir = self.filedir + self.align_metadata.filename = "alignment_metadata.json" + self.align_metadata.filepath = ( + self.align_metadata.filedir / "alignment_metadata.json" + ) + self.align_metadata.load_metadata() + + def import_shifts_from_metadata(self): + self.align_metadata = Metadata_Align() + self.align_metadata.filedir = self.filedir + self.align_metadata.filename = "alignment_metadata.json" + self.align_metadata.filepath = ( + self.align_metadata.filedir / "alignment_metadata.json" + ) + self.align_metadata.load_metadata() + self.sx = self.align_metadata.metadata["sx"] + self.sy = self.align_metadata.metadata["sy"] + self.conv = self.align_metadata.metadata["convergence"] + + def update_shift_lists(self): + self.Prep.shifts_sx_select.options = self.sx + self.Prep.shifts_sy_select.options = self.sy + self.import_button.enable() + + def import_data(self, change): + pass + + +class RawUploader_SSRL62B(UploaderBase): + """ + This uploader has two slots for choosing files and uploading: + one for references, one for projections. This + is because our data is stored in two separate folders. + """ + + def __init__(self, Import): + super().__init__() + self._init_widgets() + + self.projections = RawProjectionsTiff_SSRL62B() + + self.filedir = pathlib.Path() + self.filename = pathlib.Path() + + # Save filedir/filename for projections + self.filedir_projections = pathlib.Path() + self.filename_projections = pathlib.Path() + + # Save filedir/filename for references + self.filedir_references = pathlib.Path() + self.filename_references = pathlib.Path() + + self.user_overwrite_energy = True + + self.Import = Import + + self.filetypes_to_look_for = ["metadata.txt"] + self.files_not_found_str = "Choose a directory with a metadata.txt file." + + self.projections_found = False + self.references_found = False + + # Creates the app that goes into the Import object + self.create_app() + + def _init_widgets(self): + + # File browser for projections + self.filechooser_projections = FileChooser() + self.filechooser_projections.register_callback( + self._update_quicksearch_from_filechooser_projections + ) + self.filechooser_label_projections = Label( + "Raw Projections", style=self.header_font_style + ) + self.filechooser_projections.show_only_dirs = True + self.filechooser_projections.title = "Choose raw projections file directory:" + + # Quick path search textbox + self.quick_path_search_projections = Textarea( + placeholder=r"Z:\swelborn", + style=extend_description_style, + disabled=False, + layout=Layout(align_items="stretch"), + ) + self.quick_path_search_projections.observe( + self._update_filechooser_from_quicksearch_projections, names="value" + ) + self.quick_path_label_projections = Label("Quick path search (projections):") + + # File browser for refs + self.filechooser_references = FileChooser() + self.filechooser_references.register_callback( + self._update_quicksearch_from_filechooser_references + ) + self.filechooser_label_references = Label( + "Raw References", style=self.header_font_style + ) + self.filechooser_references.show_only_dirs = True + self.filechooser_references.title = "Choose raw reference file directory:" + + # Quick path search textbox + self.quick_path_search_references = Textarea( + placeholder=r"Z:\swelborn", + style=extend_description_style, + disabled=False, + layout=Layout(align_items="stretch"), + ) + self.quick_path_search_references.observe( + self._update_filechooser_from_quicksearch_references, names="value" + ) + self.quick_path_label_references = Label("Quick path search (references):") + + self.upload_progress = IntProgress( + description="Uploading: ", + value=0, + min=0, + max=100, + layout=Layout(justify_content="center"), + ) + + # -- Setting metadata widgets -------------------------------------------------- + + self.px_size_textbox = FloatText( + value=30, + description="Pixel size (binning 1): ", + disabled=False, + style=extend_description_style, + ) + self.px_units_dropdown_opts = ["nm", "\u00b5m", "mm", "cm"] + self.px_units_dropdown = Dropdown( + value="\u00b5m", + options=self.px_units_dropdown_opts, + disabled=False, + style=extend_description_style, + ) + self.energy_textbox = FloatText( + value=8000, + description="Energy: ", + disabled=False, + style=extend_description_style, + ) + self.energy_units_dropdown = Dropdown( + value="eV", + options=["eV", "keV"], + disabled=False, + ) + + def _update_quicksearch_from_filechooser_projections(self, *args): + self.filedir = pathlib.Path(self.filechooser_projections.selected_path) + self.filename = self.filechooser_projections.selected_filename + self.quick_path_search_projections.value = str(self.filedir / self.filename) + + def _update_quicksearch_from_filechooser_references(self, *args): + self.filedir = pathlib.Path(self.filechooser_references.selected_path) + self.filename = self.filechooser_references.selected_filename + self.quick_path_search_references.value = str(self.filedir / self.filename) + + def _update_filechooser_from_quicksearch_projections(self, change): + self.projections_found = False + self.looking_in_projections_filedir = True + self.looking_in_references_filedir = False + self.import_button.disable() + self._update_filechooser_from_quicksearch(change) + + def _update_filechooser_from_quicksearch_references(self, change): + self.references_found = False + self.looking_in_projections_filedir = False + self.looking_in_references_filedir = True + self.import_button.disable() + self._update_filechooser_from_quicksearch(change) + + def enable_import(self): + if self.references_found and self.projections_found: + self.import_button.enable() + self.projections.import_metadata() + self.projections.metadata.create_metadata_hbox() + with self.metadata_table_output: + self.metadata_table_output.clear_output(wait=True) + display(self.projections.metadata.metadata_hbox) + + def import_data(self, *args): + + tic = time.perf_counter() + self.projections.import_data(self) + toc = time.perf_counter() + self.projections.metadatas = Metadata.get_metadata_hierarchy( + self.projections.metadata.filedir / self.projections.metadata.filename + ) + self.import_status_label.value = f"Import and normalization took {toc-tic:.0f}s" + self.projections.filedir = self.projections.import_savedir + self.viewer.plot(self.projections) + + def update_filechooser_from_quicksearch(self, textfiles): + try: + metadata_filepath = ( + self.filedir / [file for file in textfiles if "metadata.txt" in file][0] + ) + except Exception: + not_found_str = ( + "This directory doesn't have a metadata.txt file," + + " please try another one." + ) + self.find_metadata_status_label.value = not_found_str + return + try: + assert metadata_filepath != [] + except Exception: + not_found_str = ( + "This directory doesn't have a metadata.txt file," + + " please try another one." + ) + self.find_metadata_status_label.value = not_found_str + return + else: + if self.looking_in_projections_filedir: + self.projections_found = True + self.projections_metadata_filepath = metadata_filepath + self.projections.import_metadata_projections(self) + self.filedir_projections = copy.copy(self.filedir) + + if self.looking_in_references_filedir: + self.references_found = True + self.references_metadata_filepath = metadata_filepath + self.projections.import_metadata_references(self) + self.filedir_references = copy.copy(self.filedir) + + self.enable_import() + # self.projections.import_metadata(self) + # self.metadata_table = self.projections.metadata.metadata_to_DataFrame() + # with self.metadata_table_output: + # self.metadata_table_output.clear_output(wait=True) + # display(self.projections.metadata.dataframe) + + def create_app(self): + + self.app = HBox( + [ + VBox( + [ + self.filechooser_label_projections, + self.quick_path_label_projections, + HBox( + [ + self.quick_path_search_projections, + self.import_button.button, + ] + ), + self.filechooser_projections, + self.filechooser_label_references, + self.quick_path_label_references, + self.quick_path_search_references, + self.filechooser_references, + HBox( + [ + self.px_size_textbox, + self.px_units_dropdown, + ] + ), + HBox( + [ + self.energy_textbox, + self.energy_units_dropdown, + ] + ), + # self.save_tiff_on_import_checkbox, + ], + ), + self.viewer.app, + ], + layout=Layout(justify_content="center"), + ) + + +class RawUploader_SSRL62C(UploaderBase): + """""" + + def __init__(self, Import): + super().__init__() + self._init_widgets() + self.user_overwrite_energy = False + self.projections = RawProjectionsXRM_SSRL62C() + self.Import = Import + self.filechooser.title = "Choose a Raw XRM File Directory" + self.filetypes_to_look_for = [".txt"] + self.files_not_found_str = "Choose a directory with a ScanInfo file." + + # Creates the app that goes into the Import object + self.create_app() + + def _init_widgets(self): + self.upload_progress = IntProgress( + description="Uploading: ", + value=0, + min=0, + max=100, + layout=Layout(justify_content="center"), + ) + self.energy_select_multiple = SelectMultiple( + options=["7700.00", "7800.00", "7900.00"], + rows=3, + description="Energies (eV): ", + disabled=True, + style=extend_description_style, + ) + self.energy_select_label = "Select energies" + self.energy_select_label = Label( + self.energy_select_label, style=self.header_font_style + ) + self.energy_overwrite_textbox = FloatText( + description="Overwrite Energy (eV): ", + style=extend_description_style, + disabled=True, + ) + self.energy_overwrite_textbox.observe(self.energy_overwrite, names="value") + + self.already_uploaded_energies_select = Select( + options=["7700.00", "7800.00", "7900.00"], + rows=3, + description="Uploaded Energies (eV): ", + disabled=True, + style=extend_description_style, + ) + self.already_uploaded_energies_label = "Previously uploaded energies" + self.already_uploaded_energies_label = Label( + self.already_uploaded_energies_label, style=self.header_font_style + ) + + def energy_overwrite(self, *args): + if ( + self.energy_overwrite_textbox.value + != self.projections.energies_list_float[0] + and self.energy_overwrite_textbox.value is not None + ): + self.user_input_energy_float = self.energy_overwrite_textbox.value + self.user_input_energy_str = str(f"{self.user_input_energy_float:08.2f}") + self.energy_select_multiple.options = [ + self.user_input_energy_str, + ] + self.projections.pixel_sizes = [ + self.projections.calculate_px_size( + self.user_input_energy_float, self.projections.binning + ) + ] + self.user_overwrite_energy = True + + def import_data(self): + + tic = time.perf_counter() + self.projections.import_filedir_all(self) + toc = time.perf_counter() + self.projections.metadatas = Metadata.get_metadata_hierarchy( + self.projections.metadata.filedir / self.projections.metadata.filename + ) + self.projections.status_label.value = ( + f"Import and normalization took {toc-tic:.0f}s" + ) + self.projections.filedir = self.projections.import_savedir + self.viewer.plot(self.projections) + + def update_filechooser_from_quicksearch(self, textfiles): + try: + scan_info_filepath = ( + self.filedir / [file for file in textfiles if "ScanInfo" in file][0] + ) + except Exception: + not_found_str = ( + "This directory doesn't have a ScanInfo file," + + " please try another one." + ) + self.find_metadata_status_label.value = not_found_str + return + try: + assert scan_info_filepath != [] + except Exception: + not_found_str = ( + "This directory doesn't have a ScanInfo file," + + " please try another one." + ) + self.find_metadata_status_label.value = not_found_str + return + else: + self.user_overwrite_energy = False + self.projections.import_metadata(self) + self.metadata_table = self.projections.metadata.metadata_to_DataFrame() + with self.metadata_table_output: + self.metadata_table_output.clear_output(wait=True) + display(self.projections.metadata.dataframe) + self.import_button.enable() + if self.projections.energy_guessed: + self.energy_overwrite_textbox.disabled = False + self.energy_overwrite_textbox.value = ( + self.projections.energies_list_float[0] + ) + else: + self.energy_overwrite_textbox.disabled = True + self.energy_overwrite_textbox.value = 0 + self.check_energy_folders() + + def check_energy_folders(self): + self.already_uploaded_energies_select.disabled = True + folders = [pathlib.Path(f) for f in os.scandir(self.filedir) if f.is_dir()] + reg_exp = re.compile("\d\d\d\d\d.\d\deV") + ener_folders = map(reg_exp.findall, [str(folder) for folder in folders]) + self.already_uploaded_energies = [ + str(folder[0][:-2]) for folder in ener_folders if (len(folder) > 0) + ] + self.already_uploaded_energies_select.options = self.already_uploaded_energies + self.already_uploaded_energies_select.disabled = False + + def create_app(self): + self.app = HBox( + [ + VBox( + [ + self.file_chooser_label, + self.quick_path_label, + HBox( + [ + self.quick_path_search, + self.import_button.button, + ] + ), + self.filechooser, + self.energy_select_label, + self.energy_select_multiple, + self.energy_overwrite_textbox, + self.save_tiff_on_import_checkbox, + VBox( + [ + self.already_uploaded_energies_label, + self.already_uploaded_energies_select, + ], + layout=Layout(align_content="center"), + ), + ], + ), + self.viewer.app, + ], + layout=Layout(justify_content="center"), + ) + + +class RawUploader_ALS832(UploaderBase): + """ + Raw uploaders are the way you get your raw data (projections, flats, dark fields) + into TomoPyUI. It holds a ProjectionsBase subclass (see io.py) that will do all of + the data import stuff. the ProjectionsBase subclass for SSRL is + RawProjectionsXRM_SSRL62. For you, it could be named + RawProjectionsHDF5_APSyourbeamlinenumber(). + + """ + + def __init__(self, Import): + super().__init__() # look at UploaderBase __init__() + self._init_widgets() + self.projections = RawProjectionsHDF5_ALS832() + self.reset_metadata_to = Metadata_ALS_832_Raw + self.Import = Import + self.filechooser.title = "Import Raw hdf5 File" + self.filetypes_to_look_for = [".h5"] + self.files_not_found_str = "Choose a directory with an hdf5 file." + + # Creates the app that goes into the Import object + self.create_app() + + def _init_widgets(self): + """ + You can make your widgets more fancy with this function. See the example in + RawUploader_SSRL62C. + """ + pass + + def import_data(self): + """ + This is what is called when you click the blue import button on the frontend. + """ + with self.progress_output: + self.progress_output.clear_output() + display(self.import_status_label) + tic = time.perf_counter() + self.projections.import_file_all(self) + toc = time.perf_counter() + self.projections.metadatas = Metadata.get_metadata_hierarchy( + self.projections.metadata.filedir / self.projections.metadata.filename + ) + self.import_status_label.value = f"Import and normalization took {toc-tic:.0f}s" + self.viewer.plot(self.projections) + + def update_filechooser_from_quicksearch(self, h5files): + """ + This is what is called when you update the quick path search bar. Right now, + this is very basic. If you want to see a more complex version of this you can + look at the example in PrenormUploader. + + This is called after _update_filechooser_from_quicksearch in UploaderBase. + """ + if len(h5files) == 1: + self.filename = h5files[0] + elif len(h5files) > 1 and self.filename is None: + self.find_metadata_status_label.value = ( + "Multiple h5 files found in this" + + " directory. Choose one with the file browser." + ) + self.import_button.disable() + return + self.projections.metadata = self.reset_metadata_to() + self.projections.import_metadata(self.filedir / self.filename) + self.projections.metadata.metadata_to_DataFrame() + with self.metadata_table_output: + self.metadata_table_output.clear_output(wait=True) + display(self.projections.metadata.dataframe) + self.import_button.enable() + + def create_app(self): + self.app = HBox( + [ + VBox( + [ + self.quick_path_label, + HBox( + [ + self.quick_path_search, + self.import_button.button, + ] + ), + self.filechooser, + ], + ), + self.viewer.app, + ], + layout=Layout(justify_content="center"), + ) + +class RawUploader_APS(UploaderBase): + """ + Raw uploaders are the way you get your raw data (projections, flats, dark fields) + into TomoPyUI. It holds a ProjectionsBase subclass (see io.py) that will do all of + the data import stuff. the ProjectionsBase subclass for SSRL is + RawProjectionsXRM_SSRL62. For you, it could be named + RawProjectionsHDF5_APSyourbeamlinenumber(). + + """ + + def __init__(self, Import): + super().__init__() # look at UploaderBase __init__() + self._init_widgets() + self.projections = RawProjectionsHDF5_APS() + self.reset_metadata_to = Metadata_APS_Raw + self.Import = Import + self.filechooser.title = "Import Raw hdf5 File" + self.filetypes_to_look_for = [".h5"] + self.files_not_found_str = "Choose a directory with an hdf5 file." + + # Creates the app that goes into the Import object + self.create_app() + + def _init_widgets(self): + """ + You can make your widgets more fancy with this function. See the example in + RawUploader_SSRL62C. + """ + pass + + def import_data(self): + """ + This is what is called when you click the blue import button on the frontend. + """ + with self.progress_output: + self.progress_output.clear_output() + display(self.import_status_label) + tic = time.perf_counter() + self.projections.import_file_all(self) + toc = time.perf_counter() + self.import_status_label.value = f"Import and normalization took {toc-tic:.0f}s" + self.viewer.plot(self.projections) + + def update_filechooser_from_quicksearch(self, h5files): + """ + This is what is called when you update the quick path search bar. Right now, + this is very basic. If you want to see a more complex version of this you can + look at the example in PrenormUploader. + + This is called after _update_filechooser_from_quicksearch in UploaderBase. + """ + if len(h5files) == 1: + self.filename = h5files[0] + elif len(h5files) > 1 and self.filename is None: + self.find_metadata_status_label.value = ( + "Multiple h5 files found in this" + + " directory. Choose one with the file browser." + ) + self.import_button.disable() + return + self.projections.metadata = self.reset_metadata_to() + self.projections.import_metadata(self.filedir / self.filename) + self.projections.metadata.metadata_to_DataFrame() + with self.metadata_table_output: + self.metadata_table_output.clear_output(wait=True) + display(self.projections.metadata.dataframe) + self.import_button.enable() + + def create_app(self): + self.app = HBox( + [ + VBox( + [ + self.quick_path_label, + HBox( + [ + self.quick_path_search, + self.import_button.button, + ] + ), + self.filechooser, + ], + ), + self.viewer.app, + ], + layout=Layout(justify_content="center"), + ) + + +class RawUploader_APS(RawUploader_APS): + """ + See descriptions in RawUploader_ALS832 superclass. You shouldn't have to do much + here other than changing self.projections and self.reset_metadata_to if you change + those names. + # Francesco: edit here, if needed. + """ + + def __init__(self, Import): + super().__init__(Import) + self.projections = RawProjectionsHDF5_APS() + self.reset_metadata_to = Metadata_APS_Raw diff --git a/tomopyui/widgets/main.py b/tomopyui/widgets/main.py index 0ec2397..882badc 100644 --- a/tomopyui/widgets/main.py +++ b/tomopyui/widgets/main.py @@ -1,22 +1,29 @@ -from ipywidgets import * -from tomopyui.widgets._shared.helpers import import_module_set_env -import tomopyui.widgets.meta as meta import multiprocessing +from ipywidgets import * +from tomopyui.widgets.helpers import import_module_set_env +from tomopyui.widgets.imports import ( + Import_SSRL62C, + Import_SSRL62B, + Import_ALS832, + Import_APS, +) +from tomopyui.widgets.center import Center +from tomopyui.widgets.analysis import Align, Recon +from tomopyui.widgets.dataexplorer import DataExplorerTab +from tomopyui.widgets.prep import Prep # checks if cupy is installed. if not, disable cuda and certain gui aspects -# TODO: can put this somewhere else (in meta?) +# TODO: can put this somewhere else cuda_import_dict = {"cupy": "cuda_enabled"} import_module_set_env(cuda_import_dict) # checks how many cpus available for compute on CPU # TODO: can later add a bounded textbox for amount of CPUs user wants to use # for reconstruction. right now defaults to all cores being used. - - os.environ["num_cpu_cores"] = str(multiprocessing.cpu_count()) -def create_dashboard(): +def create_dashboard(institution: str): """ This is the function to open the app in a jupyter notebook. In jupyter, run the following commands: @@ -26,18 +33,34 @@ def create_dashboard(): %matplotlib ipympl import tomopyui.widgets.main as main - dashboard, file_import, center, prep, align, recon = main.create_dashboard() + ( + dashboard_output, + dashboard, + file_import, + prep, + center, + align, + recon, + dataexplorer, + ) = main.create_dashboard( + "ALS_832" + ) # can be "SSRL_62C", "ALS_832", "APS" dashboard """ - - file_import = meta.Import() - center = meta.Center(file_import) - prep = meta.Prep(file_import) - align = meta.Align(file_import, center) - recon = meta.Recon(file_import, center) - dataexplorer = meta.DataExplorerTab(align, recon) - dataexplorer.create_data_explorer_tab() + if institution == "ALS_832": + file_import = Import_ALS832() + if institution == "SSRL_62C": + file_import = Import_SSRL62C() + if institution == "SSRL_62B": + file_import = Import_SSRL62B() + if institution == "APS": + file_import = Import_APS() + prep = Prep(file_import) + center = Center(file_import) + align = Align(file_import, center) + recon = Recon(file_import, center) + dataexplorer = DataExplorerTab(align, recon) for checkbox in ( align.astra_cuda_methods_checkboxes + recon.astra_cuda_methods_checkboxes @@ -54,7 +77,8 @@ def create_dashboard(): dashboard_tabs = [ file_import.tab, - center.center_tab, + prep.tab, + center.tab, align.tab, recon.tab, dataexplorer.tab, @@ -63,6 +87,7 @@ def create_dashboard(): dashboard_titles = [ "Import", + "Prep", "Center", "Align", "Reconstruct", @@ -73,11 +98,42 @@ def create_dashboard(): dashboard = Tab(titles=dashboard_titles) dashboard.children = dashboard_tabs + # workaround for nested bqplot issue + def update_dashboard(change): + dashboard.children = dashboard_tabs + with dashboard_output: + dashboard_output.clear_output(wait=True) + display(dashboard) + + accordions = [ + file_import.raw_accordion, + file_import.prenorm_accordion, + prep.viewer_accordion, + center.manual_center_accordion, + align.viewer_accordion, + recon.viewer_accordion, + dataexplorer.analysis_browser_accordion, + # dataexplorer.recent_alignment_accordion, + # dataexplorer.recent_recon_accordion, + ] + + [ + accordion.observe(update_dashboard, names="selected_index") + for accordion in accordions + ] + + dashboard.observe(update_dashboard, names="selected_index") + dashboard_output = Output() + with dashboard_output: + display(dashboard) + return ( + dashboard_output, dashboard, file_import, - center, prep, + center, align, recon, + dataexplorer, ) diff --git a/tomopyui/widgets/meta.py b/tomopyui/widgets/meta.py deleted file mode 100644 index 145eed8..0000000 --- a/tomopyui/widgets/meta.py +++ /dev/null @@ -1,2325 +0,0 @@ -#!/usr/bin/env python - -from ipywidgets import * -from ._import import import_helpers -from ._shared import helpers -from ._shared._init_widgets import init_widgets, _set_widgets_from_load_metadata -from ipyfilechooser import FileChooser -from mpl_interactions import ( - hyperslicer, - ioff, - interactive_hist, - zoom_factory, - panhandler, -) -from bqplot_image_gl import ImageGL -from tomopyui.backend.util.center import write_center -from tomopy.recon.rotation import find_center_vo, find_center, find_center_pc -from tomopyui.backend.util.metadata_io import ( - save_metadata, - load_metadata, - metadata_to_DataFrame, -) - -# includes astra_cuda_recon_algorithm_kwargs, tomopy_recon_algorithm_kwargs, -# and tomopy_filter_names, extend_description_style -from tomopyui._sharedvars import * -from tomopyui.backend.tomodata import TomoData - -import functools -import os -import tomopy.prep.normalize -import tomopyui.backend.tomodata as td -import matplotlib.pyplot as plt -import numpy as np -import logging -import bqplot as bq -import pathlib - - -class Import: - """ - Class to import tomography data. At this point, it is assumed that the data - coming in is already normalized externally. - - Attributes - ---------- - angle_start, angle_end : double - Start and end angles of the data being imported - num_theta : int - Number of theta values in the dataset. Currently does not do anything - in the backend. - prj_shape : tuple, (Z, Y, X) - Shape pulled from image imported. After choosing a file in the - Filechooser object, this will update. This is sent to Align and Recon - objects to instantiate sliders. - angles_textboxes : list of :doc:`Textbox ` - List of :doc:`Textbox ` widgets for angles. - import_opts_list : list of str - Right now, the only option on import is "rotate". If other options were - to be added, you can add to this list. Each of these options will - create a :doc:`Checkbox ` in self.opts_checkboxes. - opts_checkboxes : list of :doc:`Checkbox ` - Created from import_opts_list. - fpath : str - Updated when file or filepath is chosen. - fname : str - Updated when file is chosen (if only directory is chosen, fname="") - ftype : "npy", "tiff" - Filetypes. Should be expanded to h5 files in future versions. - filechooser : FileChooser() - Filechooser widget - prj_range_x : (0, number of x pixels) - Retrieved from reading image metadata after choosing a file or folder. - prj_range_y : (0, number of y pixels) - Retrieved from reading image metadata after choosing a file or folder. - prj_range_z : (0, number of z pixels) - Not currently working - tomo : tomodata.TomoData() - Created after calling make_tomo. - wd : str - Current working directory, set after choosing file or folder. This is - defaulted to fpath - log : logging.logger - Logger used throughout the program - log_handler : - Can use this as context manager, see - :doc:`ipywidgets docs `. - metadata : dict - Keys are attributes of the class. Values are their values. They are - updated by `Import.set_metadata()`. - tab : :doc:`HBox ` - Box for all the :doc:`widgets ` that are in the - `Import` tab. Created at end of - __init__(). - - """ - - def __init__(self): - - # Init textboxes - self.angle_start = -90.0 - self.angle_end = 90.0 - self.num_theta = 360 - self.prj_shape = None - self.prj_range_x = (0, 100) - self.prj_range_y = (0, 100) - self.prj_range_z = (0, 100) - self.tomo = None - self.angles_textboxes = import_helpers.create_angles_textboxes(self) - - # Init checkboxes - self.import_opts_list = ["rotate"] - self.import_opts = {key: False for key in self.import_opts_list} - self.opts_checkboxes = helpers.create_checkboxes_from_opt_list( - self.import_opts_list, self.import_opts, self - ) - - # Init filechooser - self.fpath = None - self.fname = None - self.ftype = None - self.filechooser = FileChooser() - self.filechooser.register_callback(self.update_file_information) - self.filechooser.title = "Import Normalized Tomogram:" - self.wd = None - - # Init filechooser for align metadata - self.fpath_align = None - self.fname_align = None - self.filechooser_align = FileChooser() - self.filechooser_align.register_callback(self.update_file_information_align) - self.filechooser_align.title = "Import Alignment Metadata:" - - # Init filechooser for recon metadata - self.fpath_recon = None - self.fname_recon = None - self.filechooser_recon = FileChooser() - self.filechooser_recon.register_callback(self.update_file_information_recon) - self.filechooser_recon.title = "Import Reconstruction Metadata:" - - # Init logger to be used throughout the app. - # TODO: This does not need to be under Import. - self.log = logging.getLogger(__name__) - self.log_handler, self.log = helpers.return_handler(self.log, logging_level=20) - - # Init metadata - self.metadata = {} - self.set_metadata() - - # Create tab - self.make_tab() - - def set_wd(self, wd): - """ - Sets the current working directory of `Import` class and changes the - current directory to it. - """ - self.wd = wd - os.chdir(wd) - - def set_metadata(self): - """ - Sets relevant metadata for `Import` - """ - self.metadata = { - "fpath": self.fpath, - "fname": self.fname, - "angle_start": self.angle_start, - "angle_end": self.angle_end, - "num_theta": self.num_theta, - "prj_range_x": self.prj_range_x, - "prj_range_y": self.prj_range_y, - } | self.import_opts - - def update_file_information(self): - """ - Callback for `Import`.filechooser. - """ - self.fpath = self.filechooser.selected_path - self.fname = self.filechooser.selected_filename - self.set_wd(self.fpath) - # metadata must be set here in case tomodata is created (for folder - # import). this can be changed later. - self.set_metadata() - self.get_prj_shape() - self.set_prj_ranges() - self.set_metadata() - - def update_file_information_align(self): - """ - Callback for filechooser_align. - """ - self.fpath_align = self.filechooser_align.selected_path - self.fname_align = self.filechooser_align.selected_filename - self.set_metadata() - - def update_file_information_recon(self): - """ - Callback for filechooser_recon. - """ - self.fpath_recon = self.filechooser_recon.selected_path - self.fname_recon = self.filechooser_recon.selected_filename - self.set_metadata() - - def get_prj_shape(self): - """ - Grabs the image shape depending on the filename. Does this without - loading the image into memory. - """ - if self.fname.__contains__(".tif"): - self.prj_shape = helpers.get_img_shape( - self.fpath, - self.fname, - "tiff", - self.metadata, - ) - elif self.fname.__contains__(".npy"): - self.prj_shape = helpers.get_img_shape( - self.fpath, - self.fname, - "npy", - self.metadata, - ) - elif self.fname == "": - self.prj_shape = helpers.get_img_shape( - self.fpath, self.fname, "tiff", self.metadata, folder_import=True - ) - - def set_prj_ranges(self): - self.prj_range_x = (0, self.prj_shape[2] - 1) - self.prj_range_y = (0, self.prj_shape[1] - 1) - self.prj_range_z = (0, self.prj_shape[0] - 1) - - def make_tab(self): - """ - Creates the HBox which stores widgets. - """ - import_widgets = [ - [self.filechooser], - self.angles_textboxes, - self.opts_checkboxes, - ] - import_widgets = [item for sublist in import_widgets for item in sublist] - self.tab = VBox( - [ - HBox(import_widgets), - HBox([self.filechooser_align, self.filechooser_recon]), - ] - ) - - def make_tomo(self): - """ - Creates a `~tomopyui.backend.tomodata.TomoData` object and stores it in - `Import`. - - .. code-block:: python - - # In Jupyter: - - # Cell 1: - from ipywidgets import * - from tomopyui.widgets.meta import Import - from - a = Import() - a.tab - # You should see the HBox widget, you can select your file. - - # Cell2: - a.make_tomo() # creates tomo.TomoData based on inputs - a.tomo.prj_imgs # access the projections like so. - - """ - self.tomo = td.TomoData(metadata=self.metadata) - - -class Plotter: - """ - Class for plotting. Creates - :doc:`hyperslicer ` for `Center`, - `Align`, and `Recon` classes. - - Attributes - ---------- - Import : `Import` - Needs an import object to be constructed. - DataExplorer : `DataExplorer` - Optionally imports a `DataExplorer` object - prj_range_x_slider : :doc:`IntRangeSlider ` - Used in both Align and Recon as their x range slider. - prj_range_y_slider : :doc:`IntRangeSlider ` - Used in both Align and Recon as their y range slider. - set_range_button : :doc:`Button ` - Used in both Align and Recon for setting the range to the - current plot range on the image shown in the - :doc:`hyperslicer ` (left fig). - slicer_with_hist_fig : :doc:`matplotlib subplots ` - Figure containing an :doc:`hyperslicer ` (left) and - a :doc:`histogram ` (right) associated with that slice. - threshold_control : :class:`mpl-interactions:mpl_interactions.controller.Controls` - Comes from :doc:`hyperslicer `. Contains slider associated - with the current slice. See mpl-interactions for details. - threshold_control_list : list of :doc:`ipywidgets ` - Allows for organization of the widgets after creating the :doc:`hyperslicer `. - save_animation_button : :doc:`Button ` - Not implemented in `Align` or `Recon` yet (TODO). Enables saving of mp4 - of the :doc:`hyperslicer ` with its current :doc:`histogram ` threshold range. - """ - - def __init__(self, Import=None, DataExplorer=None): - - self.DataExplorer = DataExplorer - self.Import = Import - self._init_widgets() - - def _init_widgets(self): - self.prj_range_x_slider = IntRangeSlider( - value=[0, 10], - min=0, - max=10, - step=1, - description="Projection X Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - style=extend_description_style, - ) - self.prj_range_y_slider = IntRangeSlider( - value=[0, 10], - min=0, - max=10, - step=1, - description="Projection Y Range:", - disabled=True, - continuous_update=False, - orientation="horizontal", - readout=True, - readout_format="d", - style=extend_description_style, - ) - self.set_range_button = Button( - description="Click to set current range to plot range.", - layout=Layout(width="auto"), - ) - self.slicer_with_hist_fig = None - self.threshold_control = None - self.threshold_control_list = None - self.save_animation_button = None - - def create_slicer_with_hist(self, plot_type="prj", imagestack=None, Center=None): - """ - Creates a plot with a :doc:`histogram ` - for a given set of data. Sets Plotter attributes: slicer_with_hist_fig, - threshold_control, threshold_control_list, and set_range_button. - - Parameters - ----------- - plot_type : "prj" or "center" - Choice will determine what the :doc:`hyperslicer ` will show (projection - images or reconstructed data with different centers, respectively). - imagestack : 3D `numpy.ndarray` - Images to show in :doc:`hyperslicer ` - Center : `Center` - Used to title the plot with its attribute index_to_try. - - """ - - # Turn off immediate display of plot. - with plt.ioff(): - fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5), layout="tight") - fig.suptitle("") - fig.canvas.header_visible = False - - # check what kind of plot it is - if plot_type == "prj": - self.Import.make_tomo() - imagestack = self.Import.tomo.prj_imgs - theta = self.Import.tomo.theta - slider_linsp = theta * 180 / np.pi - slider_str = "Image No:" - ax1.set_title("Projection Images") - if plot_type == "center": - imagestack = imagestack - slider_linsp = (0, imagestack.shape[0]) - slider_str = "Center Number:" - ax1.set_title(f"Reconstruction on slice {Center.index_to_try}") - - ax2.set_title("Image Intensity Histogram") - ax2.set_yscale("log") - - # updates histogram based on slider, z-axis of hyperstack - def histogram_data_update(**kwargs): - return imagestack[threshold_control.params[slider_str]] - - def histogram_lim_update(xlim, ylim): - current_ylim = [ylim[0], ylim[1]] - ax2.set_ylim(ylim[0], ylim[1]) - current_xlim = [xlim[0], xlim[1]] - ax2.set_xlim(xlim[0], xlim[1]) - - # creating slicer, thresholding is in vmin_vmax param. - # tuples here create a slider in that range - threshold_control = hyperslicer( - imagestack, - vmin_vmax=("r", imagestack.min(), imagestack.max()), - play_buttons=True, - play_button_pos="right", - ax=ax1, - axis0=slider_linsp, - names=(slider_str,), - ) - current_xlim = [100, -100] - current_ylim = [1, -100] - for i in range(imagestack.shape[0]): - image_histogram_temp = np.histogram(imagestack[i], bins=100) - if image_histogram_temp[1].min() < current_xlim[0]: - current_xlim[0] = image_histogram_temp[1].min() - if image_histogram_temp[1].max() > current_xlim[1]: - current_xlim[1] = image_histogram_temp[1].max() - if image_histogram_temp[0].max() > current_ylim[1]: - current_ylim[1] = image_histogram_temp[0].max() - - image_histogram = interactive_hist( - histogram_data_update, - xlim=("r", current_xlim[0], current_xlim[1]), - ylim=("r", 1, current_ylim[1]), - bins=100, - ax=ax2, - controls=threshold_control[slider_str], - use_ipywidgets=True, - ) - - # registering to change limits with sliders - image_histogram.register_callback( - histogram_lim_update, ["xlim", "ylim"], eager=True - ) - - # finding lowest pixel value on image to set the lowest scale to that - vmin = imagestack.min() - vmax = imagestack.max() - - # putting vertical lines - lower_limit_line = ax2.axvline(vmin, color="k") - upper_limit_line = ax2.axvline(vmax, color="k") - - # putting vertical lines - def hist_vbar_range_callback(vmin, vmax): - lower_limit_line.set_xdata(vmin) - upper_limit_line.set_xdata(vmax) - - threshold_control.register_callback( - hist_vbar_range_callback, ["vmin", "vmax"], eager=True - ) - - # allowing zoom/pan on scroll/right mouse drag - self.pan_handler = panhandler(fig) - disconnect_zoom = zoom_factory(ax1) - disconnect_zoom2 = zoom_factory(ax2) - - # sets limits according to current image limits - def set_img_lims_on_click(click): - xlim = [int(np.rint(x)) for x in ax1.get_xlim()] - ylim = [int(np.rint(x)) for x in ax1.get_ylim()] - if xlim[0] < 0: - xlim[0] = 0 - if xlim[1] > imagestack.shape[2]: - xlim[1] = imagestack.shape[2] - if ylim[1] < 0: - ylim[1] = 0 - if ylim[0] > imagestack.shape[1]: - ylim[0] = imagestack.shape[1] - - xlim = tuple(xlim) - ylim = tuple(ylim) - self.prj_range_x_slider.value = xlim - self.prj_range_y_slider.value = ylim[::-1] - self.set_range_button.button_style = "success" - self.set_range_button.icon = "square-check" - - self.set_range_button.on_click(set_img_lims_on_click) - - # saving some things in the object - self.slicer_with_hist_fig = fig - self.threshold_control = threshold_control - self.threshold_control_list = [ - slider for slider in threshold_control.vbox.children - ] - - def _swap_axes_on_click(self, imagestack, image, slider): - imagestack = np.swapaxes(imagestack, 0, 1) - slider.max = imagestack.shape[0] - 1 - slider.value = 0 - image.image = imagestack[0] - return imagestack - - def _remove_high_low_intensity_on_click(self, imagestack, scale, slider): - vmin, vmax = np.percentile(imagestack, q=(0.5, 99.5)) - slider.min = vmin - slider.max = vmax - self._set_bqplot_hist_range(scale, vmin, vmax) - - def _set_bqplot_hist_range(self, scale, vmin, vmax): - scale["image"].min = vmin - scale["image"].max = vmax - - def _create_two_plots_with_two_sliders(self, imagestacks, titles): - fig1, plotted_image1, scale_image1 = self._create_bqplot_from_imagestack( - imagestacks[0], titles[0] - ) - fig2, plotted_image2, scale_image2 = self._create_bqplot_from_imagestack( - imagestacks[1], titles[1] - ) - figs = [fig1, fig2] - images = [plotted_image1, plotted_image2] - scales = [scale_image1, scale_image2] - - # slider 1 + play button - def change_image1(change): - plotted_image1.image = imagestacks[0][change.new] - - slider1 = IntSlider( - value=0, - min=0, - max=imagestacks[0].shape[0] - 1, - step=1, - ) - slider1.observe(change_image1, names="value") - - play1 = Play( - value=0, - min=0, - max=imagestacks[0].shape[0] - 1, - step=1, - interval=100, - disabled=False, - ) - jslink((play1, "value"), (slider1, "value")) - - # slider 2 + play button - def change_image2(change): - plotted_image2.image = imagestacks[1][change.new] - - slider2 = IntSlider( - value=0, - min=0, - max=imagestacks[1].shape[0] - 1, - step=1, - ) - slider2.observe(change_image2, names="value") - play2 = Play( - value=0, - min=0, - max=imagestacks[1].shape[0] - 1, - step=1, - interval=100, - disabled=False, - ) - jslink((play2, "value"), (slider2, "value")) - - sliders = [slider1, slider2] - plays = [play1, play2] - - return figs, images, scales, sliders, plays - - def _create_two_plots_with_single_slider(self, imagestacks, titles): - - fig1, plotted_image1, scale_image1 = self._create_bqplot_from_imagestack( - imagestacks[0], titles[0] - ) - fig2, plotted_image2, scale_image2 = self._create_bqplot_from_imagestack( - imagestacks[1], titles[1] - ) - figs = [fig1, fig2] - images = [plotted_image1, plotted_image2] - scales = [scale_image1, scale_image2] - - def change_image(change): - plotted_image1.image = imagestacks[0][change.new] - plotted_image2.image = imagestacks[1][change.new] - - slider = IntSlider( - value=0, - min=0, - max=imagestacks[0].shape[0] - 1, - step=1, - ) - slider.observe(change_image, names="value") - - play = Play( - value=0, - min=0, - max=imagestacks[0].shape[0] - 1, - step=1, - interval=100, - disabled=False, - ) - jslink((play, "value"), (slider, "value")) - - return figs, images, scales, slider, play - - def _create_bqplot_from_imagestack(self, imagestack, title="title"): - scale_x = bq.LinearScale(min=0, max=1) - scale_y = bq.LinearScale(min=1, max=0) - scale_x_y = {"x": scale_x, "y": scale_y} - fig = bq.Figure(scales=scale_x_y) - projection_num = 0 - scale_image = { - "x": scale_x, - "y": scale_y, - "image": bq.ColorScale( - min=float(np.min(imagestack)), - max=float(np.max(imagestack)), - scheme="viridis", - ), - } - plotted_image = ImageGL( - image=imagestack[projection_num], - scales=scale_image, - ) - fig.marks = (plotted_image,) - fig.layout.width = "550px" - fig.layout.height = "550px" - fig.title = f"{title}" - - return fig, plotted_image, scale_image - - def save_prj_animation(self): - """ - Creates button to save animation. Not yet implemented. - """ - - def save_animation_on_click(click): - os.chdir(self.Import.fpath) - self.save_prj_animation_button.button_style = "info" - self.save_prj_animation_button.icon = "fas fa-cog fa-spin fa-lg" - self.save_prj_animation_button.description = "Making a movie." - anim = self.threshold_control.save_animation( - "projections_animation.mp4", - self.slicer_with_hist_fig, - "Angle", - interval=35, - ) - self.save_prj_animation_button.button_style = "success" - self.save_prj_animation_button.icon = "square-check" - self.save_prj_animation_button.description = ( - "Click again to save another animation." - ) - - self.save_prj_animation_button = Button( - description="Click to save this animation", layout=Layout(width="auto") - ) - - self.save_prj_animation_button.on_click(save_animation_on_click) - - -class Prep: - def __init__(self, Import): - - self.tomo = Import.tomo - self.dark = None - self.flat = None - self.darkfc = FileChooser() - self.darkfc.register_callback(self.set_fpath_dark) - self.flatfc = FileChooser() - self.flatfc.register_callback(self.set_fpath_flat) - self.fpathdark = None - self.fnamedark = None - self.fpathflat = None - self.fnameflat = None - self.rotate = Import.metadata["rotate"] - self.set_metadata_dark() - self.set_metadata_flat() - - def set_metadata_dark(self): - self.darkmetadata = { - "fpath": self.fpathdark, - "fname": self.fnamedark, - "opts": {"rotate": self.rotate}, - } - - def set_metadata_flat(self): - self.flatmetadata = { - "fpath": self.fpathflat, - "fname": self.fnameflat, - "opts": {"rotate": self.rotate}, - } - - def set_fpath_dark(self): - self.fpathdark = self.darkfc.selected_path - self.fnamedark = self.darkfc.selected_filename - self.set_metadata() - - def set_fpath_flat(self): - self.fpathflat = self.flatfc.selected_path - self.fnameflat = self.flatfc.selected_filename - self.set_metadata() - - def normalize(self, rm_zeros_nans=True): - tomo_norm = tomopy.prep.normalize.normalize( - self.tomo.prj_imgs, self.flat.prj_imgs, self.dark.prj_imgs - ) - tomo_norm = td.TomoData(prj_imgs=prj_imgs, raw="No") - tomo_norm_mlog = tomopy.prep.normalize.minus_log(tomo_norm) - tomo_norm_mlog = td.TomoData(prj_imgs=tomoNormMLogprj_imgs, raw="No") - if rm_zeros_nans == True: - tomo_norm_mlog.prj_imgs = tomopy.misc.corr.remove_nan( - tomo_norm_mlog.prj_imgs, val=0.0 - ) - tomo_norm_mlog.prj_imgs[tomo_norm_mlog.prj_imgs == np.inf] = 0 - self.tomo = tomo_norm_mlog - - -class Center: - """ - Class for creating a tab to help find the center of rotation. See examples - for more information on center finding. - - Attributes - ---------- - Import : `Import` - Needs an import object to be constructed. - current_center : double - Current center of rotation. Updated when center_textbox is updated. - - TODO: this should be linked to both `Align` and `Recon`. - center_guess : double - Guess value for center of rotation for automatic alignment (`~tomopy.recon.rotation.find_center`). - index_to_try : int - Index to try out when automatically (entropy) or manually trying to - find the center of rotation. - search_step : double - Step size between centers (see `tomopy.recon.rotation.write_center` or - `tomopyui.backend.util.center`). - search_range : double - Will search from [center_guess - search_range] to [center_guess + search range] - in steps of search_step. - num_iter : int - Number of iterations to use in center reconstruction. - algorithm : str - Algorithm to use in the reconstruction. Chosen from dropdown list. - filter : str - Filter to be used. Only works with fbp and gridrec. If you choose - another algorith, this will be ignored. - - """ - - def __init__(self, Import): - - self.Import = Import - self.current_center = self.Import.prj_range_x[1] / 2 - self.center_guess = None - self.index_to_try = None - self.search_step = 0.5 - self.search_range = 5 - self.cen_range = None - self.num_iter = 1 - self.algorithm = "gridrec" - self.filter = "parzen" - self.metadata = {} - self.center_plotter = Plotter(Import=self.Import) - self._init_widgets() - self._set_observes() - self.make_tab() - - def set_metadata(self): - """ - Sets `Center` metadata. - """ - - self.metadata["center"] = self.current_center - self.metadata["center_guess"] = self.center_guess - self.metadata["index_to_try"] = self.index_to_try - self.metadata["search_step"] = self.search_step - self.metadata["search_range"] = self.search_range - self.metadata["cen_range"] = self.cen_range - self.metadata["num_iter"] = self.num_iter - self.metadata["algorithm"] = self.algorithm - self.metadata["filter"] = self.filter - - def _init_widgets(self): - - self.center_textbox = FloatText( - description="Center: ", - disabled=False, - style=extend_description_style, - ) - self.load_rough_center = Button( - description="Click to load rough center from imported data.", - disabled=False, - button_style="info", - tooltip="Loads the half-way pixel point for the center.", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - self.center_guess_textbox = FloatText( - description="Guess for center: ", - disabled=False, - style=extend_description_style, - ) - self.find_center_button = Button( - description="Click to automatically find center (image entropy).", - disabled=False, - button_style="info", - tooltip="", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - self.index_to_try_textbox = IntText( - description="Slice to use for auto:", - disabled=False, - style=extend_description_style, - placeholder="Default is 1/2*y pixels", - ) - self.num_iter_textbox = FloatText( - description="Number of iterations: ", - disabled=False, - style=extend_description_style, - value=self.num_iter, - ) - self.search_range_textbox = IntText( - description="Search range around center:", - disabled=False, - style=extend_description_style, - value=self.search_range, - ) - self.search_step_textbox = FloatText( - description="Step size in search range: ", - disabled=False, - style=extend_description_style, - value=self.search_step, - ) - self.algorithms_dropdown = Dropdown( - options=[key for key in tomopy_recon_algorithm_kwargs], - value=self.algorithm, - description="Algorithm:", - ) - self.filters_dropdown = Dropdown( - options=[key for key in tomopy_filter_names], - value=self.filter, - description="Algorithm:", - ) - self.find_center_vo_button = Button( - description="Click to automatically find center (Vo).", - disabled=False, - button_style="info", - tooltip="Vo's method", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - self.find_center_manual_button = Button( - description="Click to find center by plotting.", - disabled=False, - button_style="info", - tooltip="Start center-finding reconstruction with this button.", - icon="", - layout=Layout(width="auto", justify_content="center"), - ) - - def _center_update(self, change): - self.current_center = change.new - self.set_metadata() - - def _center_guess_update(self, change): - self.center_guess = change.new - self.set_metadata() - - def _load_rough_center_onclick(self, change): - self.center_guess = self.Import.prj_range_x[1] / 2 - self.current_center = self.center_guess - self.center_textbox.value = self.center_guess - self.center_guess_textbox.value = self.center_guess - self.index_to_try_textbox.value = int(np.around(self.Import.prj_range_y[1] / 2)) - self.index_to_try = self.index_to_try_textbox.value - self.set_metadata() - - def _index_to_try_update(self, change): - self.index_to_try = change.new - self.set_metadata() - - def _num_iter_update(self, change): - self.num_iter = change.new - self.set_metadata() - - def _search_range_update(self, change): - self.search_range = change.new - self.set_metadata() - - def _search_step_update(self, change): - self.search_step = change.new - self.set_metadata() - - def _update_algorithm(self, change): - self.algorithm = change.new - self.set_metadata() - - def _update_filters(self, change): - self.filter = change.new - self.set_metadata() - - def _center_textbox_slider_update(self, change): - self.center_textbox.value = self.cen_range[change.new] - self.current_center = self.center_textbox.value - self.set_metadata() - - def find_center_on_click(self, change): - """ - Callback to button for attempting to find center automatically using - `tomopy.recon.rotation.find_center`. Takes index_to_try and center_guess. - This method has worked better for me, if I use a good index_to_try - and center_guess. - """ - self.find_center_button.button_style = "info" - self.find_center_button.icon = "fa-spin fa-cog fa-lg" - self.find_center_button.description = "Importing data..." - try: - tomo = td.TomoData(metadata=self.Import.metadata) - self.Import.log.info("Imported tomo") - self.Import.log.info("Finding center...") - self.Import.log.info(f"Using index: {self.index_to_try}") - except: - self.find_center_button.description = ( - "Please choose a file first. Try again after you do that." - ) - self.find_center_button.button_style = "warning" - self.find_center_button.icon = "exclamation-triangle" - try: - self.find_center_button.description = "Finding center..." - self.find_center_button.button_style = "info" - self.current_center = find_center( - tomo.prj_imgs, - tomo.theta, - ratio=0.9, - ind=self.index_to_try, - init=self.center_guess, - ) - self.center_textbox.value = self.current_center - self.Import.log.info(f"Found center. {self.current_center}") - self.find_center_button.description = "Found center." - self.find_center_button.icon = "fa-check-square" - self.find_center_button.button_style = "success" - except: - self.find_center_button.description = ( - "Something went wrong with finding center." - ) - self.find_center_button.icon = "exclamation-triangle" - self.find_center_button.button_style = "warning" - - def find_center_vo_on_click(self, change): - """ - Callback to button for attempting to find center automatically using - `tomopy.recon.rotation.find_center_vo`. Note: this method has not worked - well for me. - """ - self.find_center_vo_button.button_style = "info" - self.find_center_vo_button.icon = "fa-spin fa-cog fa-lg" - self.find_center_vo_button.description = "Importing data..." - try: - tomo = td.TomoData(metadata=self.Import.metadata) - self.Import.log.info("Imported tomo") - self.Import.log.info("Finding center...") - self.Import.log.info(f"Using index: {self.index_to_try}") - except: - self.find_center_vo_button.description = ( - "Please choose a file first. Try again after you do that." - ) - self.find_center_vo_button.button_style = "warning" - self.find_center_vo_button.icon = "exclamation-triangle" - try: - self.find_center_vo_button.description = "Finding center using Vo method..." - self.find_center_vo_button.button_style = "info" - self.current_center = find_center_vo(tomo.prj_imgs, ncore=1) - self.center_textbox.value = self.current_center - self.Import.log.info(f"Found center. {self.current_center}") - self.find_center_vo_button.description = "Found center." - self.find_center_vo_button.icon = "fa-check-square" - self.find_center_vo_button.button_style = "success" - except: - self.find_center_vo_button.description = ( - "Something went wrong with finding center." - ) - self.find_center_vo_button.icon = "exclamation-triangle" - self.find_center_vo_button.button_style = "warning" - - def find_center_manual_on_click(self, change): - """ - Reconstructs at various centers when you click the button, and plots - the results with a slider so one can view. TODO: see X example. - Uses search_range, search_step, center_guess. - Creates a :doc:`hyperslicer ` + - :doc:`histogram ` plot - """ - self.find_center_manual_button.button_style = "info" - self.find_center_manual_button.icon = "fas fa-cog fa-spin fa-lg" - self.find_center_manual_button.description = "Starting reconstruction." - - # TODO: for memory, add only desired slice - tomo = td.TomoData(metadata=self.Import.metadata) - theta = tomo.theta - cen_range = [ - self.center_guess - self.search_range, - self.center_guess + self.search_range, - self.search_step, - ] - - # reconstruct, but also pull the centers used out to map to center - # textbox - rec, self.cen_range = write_center( - tomo.prj_imgs, - theta, - cen_range=cen_range, - ind=self.index_to_try, - mask=True, - algorithm=self.algorithm, - filter_name=self.filter, - num_iter=self.num_iter, - ) - self.center_plotter.create_slicer_with_hist( - plot_type="center", imagestack=rec, Center=self - ) - - # this maps the threshold_control slider to center texbox - self.center_plotter.threshold_control.vbox.children[0].children[1].observe( - self._center_textbox_slider_update, names="value" - ) - self.find_center_manual_button.button_style = "success" - self.find_center_manual_button.icon = "fa-check-square" - self.find_center_manual_button.description = "Finished reconstruction." - - # Make VBox instantiated outside into the plot - self.center_tab.children[2].children[0].children[2].children = [ - HBox( - [self.center_plotter.slicer_with_hist_fig.canvas], - layout=Layout(justify_content="center"), - ), - HBox( - [ - HBox( - [self.center_plotter.threshold_control_list[0]], - layout=Layout(align_items="center"), - ), - VBox(self.center_plotter.threshold_control_list[1::]), - ], - layout=Layout(justify_content="center"), - ), - ] - self.manual_center_accordion = Accordion( - children=[self.manual_center_vbox], - selected_index=None, - titles=("Find center through plotting",), - ) - - def _set_observes(self): - self.center_textbox.observe(self._center_update, names="value") - self.center_guess_textbox.observe(self._center_guess_update, names="value") - self.load_rough_center.on_click(self._load_rough_center_onclick) - self.index_to_try_textbox.observe(self._index_to_try_update, names="value") - self.num_iter_textbox.observe(self._num_iter_update, names="value") - self.search_range_textbox.observe(self._search_range_update, names="value") - self.search_step_textbox.observe(self._search_step_update, names="value") - self.algorithms_dropdown.observe(self._update_algorithm, names="value") - self.filters_dropdown.observe(self._update_filters, names="value") - self.find_center_button.on_click(self.find_center_on_click) - self.find_center_vo_button.on_click(self.find_center_vo_on_click) - self.find_center_manual_button.on_click(self.find_center_manual_on_click) - - def make_tab(self): - """ - Function to create a Center object's :doc:`Tab `. TODO: make the tab look better. - """ - - # Accordion to find center automatically - self.automatic_center_vbox = VBox( - [ - HBox([self.find_center_button, self.find_center_vo_button]), - HBox( - [ - self.center_guess_textbox, - self.index_to_try_textbox, - ] - ), - ] - ) - self.automatic_center_accordion = Accordion( - children=[self.automatic_center_vbox], - selected_index=None, - titles=("Find center automatically",), - ) - - # Accordion to find center manually - self.manual_center_vbox = VBox( - [ - self.find_center_manual_button, - HBox( - [ - self.center_guess_textbox, - self.index_to_try_textbox, - self.num_iter_textbox, - self.search_range_textbox, - self.search_step_textbox, - self.algorithms_dropdown, - self.filters_dropdown, - ], - layout=Layout( - display="flex", - flex_flow="row wrap", - align_content="center", - justify_content="flex-start", - ), - ), - VBox( - [], layout=Layout(justify_content="center", align_content="center") - ), - ] - ) - - self.manual_center_accordion = Accordion( - children=[self.manual_center_vbox], - selected_index=None, - titles=("Find center through plotting",), - ) - - self.center_tab = VBox( - [ - HBox([self.center_textbox, self.load_rough_center]), - self.automatic_center_accordion, - self.manual_center_accordion, - ] - ) - - -class Align: - """ - Class to set automatic alignment attributes in the Alignment dashboard tab. - On initialization, this class will create a tab. - Features: - - - Plotting the projection images in an - :doc:`hyperslicer ` + - :doc:`histogram `. - - Selecting the plot range through the plot. - - Selecting the plot range through range sliders. - - Reconstruction method selection. Each :doc:`Checkbox ` that is clicked - will do a reconstruction using that alignment technique, and save into a - folder with a datetime stamp. - - Save options, including tomography images before, tomography images after - and last reconstruction performed by the alignment. - - Attributes - ---------- - Import : `Import` - Needs an import object to be constructed. - prj_shape : (Z, Y, X) - Shape pulled from image, imported in Import tab. After choosing a file - in Import, and choosing radio button for turning on the tab, this will load the - projection range into the sliders. - downsample : boolean - Determines whether or not to downsample the data before alignment. - Clicking this box will open another box to choose the downsample factor - (how much you want to reduce the datasize by). This will downsample your - data by skimage.scale TODO: check to make sure this is right. - downsample_factor : double - The factor used to scale the data going into the alignment routine. - The data will be downsampled and then the alignment routine will attempt - to correct for misalignment. After completing the alignment, the original - (not downsampled) projection images will be shifted based on the shifts - found in the downsampled data. - num_iter : int - Number of alignment iterations to perform. - center : double - Center of rotation used for alignment. - upsample_factor : int - During alignment, your data can be upsampled for sub-pixel registration - during phase cross correlation. TODO: link to paper on this. - extra_options : str - This option will add the extra-options keyword argument to the tomopy - astra wrapper. TODO: See this page: X. TODO: need a list of extra options - keyword arguments. - num_batches : int - Since the data is broken up into chunks for the alignment process to - take place on the GPU, this attribute will cut the data into chunks - in the following way: - - .. code-block:: python - - # Jupyter - # Cell 1 - import numpy as np - from tomopyui.widgets.meta import Import - a = Import() - a - # choose file with FileChooser in output of this cell - - # Cell 2 - a.make_tomo() - print(a.tomo.prj_imgs.shape) - # Output : (100, 100, 100) - - # Cell 3 - num_batches = 5 - b = np.array_split(a.tomo.prj_imgs, num_batches, axis=0) - print(len(b)) - # Output : 5 - print(b[0].shape) - # Output : (20, 100, 100) - - paddingX, paddingY : int - Padding added to the projection images. - partial : boolean - If True, will use a partial dataset. The plot range sliders set the - values. - metadata : dict - Metadata from the alignment options. This passes into - tomopyui.backend.tomoalign.TomoAlign. - - """ - - def __init__(self, Import, Center): - self._init_attributes(Import, Center) - self.save_opts_list = ["tomo_after", "tomo_before", "recon", "tiff", "npy"] - self.widget_type = "Align" - init_widgets(self) # initialization of many widgets/some attributes - self.set_metadata() - self._set_metadata_obj_specific() - self._set_observes() - self._set_observes_obj_specific() - self.make_tab() - - def _init_attributes(self, Import, Center): - self.Import = Import - self.Center = Center - self.prj_shape = Import.prj_shape - self.wd = Import.wd - self.log_handler, self.log = Import.log_handler, Import.log - self.downsample = False - self.downsample_factor = 0.5 - self.num_iter = 1 - self.center = Center.current_center - self.upsample_factor = 50 - self.extra_options = {} - self.num_batches = 20 - self.prj_range_x = (0, 10) - self.prj_range_y = (0, 10) - self.paddingX = 10 - self.paddingY = 10 - self.partial = False - self.tomopy_methods_list = [key for key in tomopy_recon_algorithm_kwargs] - self.tomopy_methods_list.remove("gridrec") - self.tomopy_methods_list.remove("fbp") - self.astra_cuda_methods_list = [ - key for key in astra_cuda_recon_algorithm_kwargs - ] - self.prj_plotter = Plotter(self.Import) - self.metadata = {} - self.metadata["opts"] = {} - self.run_list = [] - - def set_metadata(self): - self.metadata["opts"]["downsample"] = self.downsample - self.metadata["opts"]["downsample_factor"] = self.downsample_factor - self.metadata["opts"]["num_iter"] = self.num_iter - self.metadata["opts"]["center"] = self.center - self.metadata["opts"]["num_batches"] = self.num_batches - self.metadata["opts"]["pad"] = ( - self.paddingX, - self.paddingY, - ) - self.metadata["opts"]["extra_options"] = self.extra_options - self.metadata["methods"] = self.methods_opts - self.metadata["save_opts"] = self.save_opts - self.metadata["prj_range_x"] = self.prj_range_x - self.metadata["prj_range_y"] = self.prj_range_y - self.metadata["partial"] = self.partial - - def _set_metadata_obj_specific(self): - self.metadata["opts"]["upsample_factor"] = self.upsample_factor - - def _set_prj_ranges_full(self): - self.prj_range_x = (0, self.prj_shape[2] - 1) - self.prj_range_y = (0, self.prj_shape[1] - 1) - self.prj_range_z = (0, self.prj_shape[0] - 1) - self.prj_range_x_slider.max = self.prj_shape[2] - 1 - self.prj_range_y_slider.max = self.prj_shape[1] - 1 - self.prj_range_x_slider.value = self.prj_range_x - self.prj_range_y_slider.value = self.prj_range_y - self.set_metadata() - - def load_metadata_align(self): - self.metadata = load_metadata(self.Import.fpath_align, self.Import.fname_align) - - def _set_attributes_from_metadata(self): - self.downsample = self.metadata["opts"]["downsample"] - self.downsample_factor = self.metadata["opts"]["downsample_factor"] - self.num_iter = self.metadata["opts"]["num_iter"] - self.center = self.metadata["opts"]["center"] - self.num_batches = self.metadata["opts"]["num_batches"] - (self.paddingX, self.paddingY) = self.metadata["opts"]["pad"] - self.extra_options = self.metadata["opts"]["extra_options"] - self.methods_opts = self.metadata["methods"] - self.save_opts = self.metadata["save_opts"] - self.prj_range_x = self.metadata["prj_range_x"] - self.prj_range_y = self.metadata["prj_range_y"] - self.partial = self.metadata["partial"] - - def _set_attributes_from_metadata_obj_specific(self): - self.upsample_factor = self.metadata["opts"]["upsample_factor"] - - # -- Radio to turn on tab --------------------------------------------- - def _activate_tab(self, change): - if change.new == 0: - self.center = self.Center.current_center - self.center_textbox.value = self.Center.current_center - self.set_metadata() - self.radio_fulldataset.disabled = False - self.load_metadata_button.disabled = False - self.start_button.disabled = False - self.prj_shape = self.Import.prj_shape - self.plotter_accordion.selected_index = 0 - self.save_options_accordion.selected_index = 0 - self.options_accordion.selected_index = 0 - self.methods_accordion.selected_index = 0 - self._set_prj_ranges_full() - self.log.info("Activated alignment.") - elif change.new == 1: - self.radio_fulldataset.disabled = True - self.prj_range_x_slider.disabled = True - self.prj_range_y_slider.disabled = True - self.load_metadata_button.disabled = True - self.plotter_accordion.selected_index = None - self.start_button.disabled = True - self.save_options_accordion.selected_index = None - self.options_accordion.selected_index = None - self.methods_accordion.selected_index = None - self.log.info("Deactivated alignment.") - - # -- Load metadata button --------------------------------------------- - def _load_metadata_all_on_click(self, change): - self.load_metadata_button.button_style = "info" - self.load_metadata_button.icon = "fas fa-cog fa-spin fa-lg" - self.load_metadata_button.description = "Importing metadata." - self.load_metadata_align() - self._set_attributes_from_metadata() - self = _set_widgets_from_load_metadata(self) - self._set_observes() - self._set_observes_obj_specific() - self.load_metadata_button.button_style = "success" - self.load_metadata_button.icon = "fa-check-square" - self.load_metadata_button.description = "Finished importing metadata." - - # -- Radio to turn on partial dataset --------------------------------- - def _activate_full_partial(self, change): - if change.new == 1: - self.partial = True - self.prj_range_x_slider.disabled = False - self.prj_range_y_slider.disabled = False - self.set_metadata() - - elif change.new == 0: - self.partial = False - self._set_prj_ranges_full() - self.prj_range_x_slider.disabled = True - self.prj_range_y_slider.disabled = True - self.set_metadata() - - # -- Plot projections button ------------------------------------------ - def _plot_prjs_on_click(self, change): - self.plot_prj_images_button.button_style = "info" - self.plot_prj_images_button.icon = "fas fa-cog fa-spin fa-lg" - self.plot_prj_images_button.description = "Importing data." - self.prj_plotter.create_slicer_with_hist() - self.plot_prj_images_button.button_style = "success" - self.plot_prj_images_button.icon = "fa-check-square" - self.plot_prj_images_button.description = "Finished Import." - self.prj_plotter.set_range_button.description = ( - "Click to set projection range to current plot range" - ) - self._make_prj_plot() - - def _make_prj_plot(self): - # Make VBox inside alignment tab into the plot - self.tab.children[1].children[0].children[1].children = [ - HBox( - [self.prj_plotter.slicer_with_hist_fig.canvas], - layout=Layout(justify_content="center"), - ), - HBox( - [ - HBox( - [self.prj_plotter.threshold_control_list[0]], - layout=Layout(align_items="center"), - ), - VBox(self.prj_plotter.threshold_control_list[1::]), - ] - ), - ] - - # -- Button to start alignment ---------------------------------------- - def set_options_and_run(self, change): - change.button_style = "info" - change.icon = "fas fa-cog fa-spin fa-lg" - change.description = ( - "Setting options and loading data into alignment algorithm." - ) - try: - from tomopyui.backend.tomoalign import TomoAlign - - a = TomoAlign(self) - change.button_style = "success" - change.icon = "fa-check-square" - change.description = "Finished alignment." - except: - change.button_style = "warning" - change.icon = "exclamation-triangle" - change.description = "Something went wrong." - - # -- Sliders ---------------------------------------------------------- - @helpers.debounce(0.2) - def _prj_range_x_update(self, change): - self.prj_range_x = change.new - self.set_metadata() - - @helpers.debounce(0.2) - def _prj_range_y_update(self, change): - self.prj_range_y = change.new - self.set_metadata() - - # -- Options ---------------------------------------------------------- - - # Number of iterations - def _update_num_iter(self, change): - self.num_iter = change.new - self.progress_total.max = change.new - self.set_metadata() - - # Center of rotation - def _update_center_textbox(self, change): - self.center = change.new - self.set_metadata() - - # Downsampling - def _downsample_turn_on(self, change): - if change.new == True: - self.downsample = True - self.downsample_factor = self.downsample_factor_textbox.value - self.downsample_factor_textbox.disabled = False - self.set_metadata() - if change.new == False: - self.downsample = False - self.downsample_factor = 1 - self.downsample_factor_textbox.value = 1 - self.downsample_factor_textbox.disabled = True - self.set_metadata() - - def _update_downsample_factor_dict(self, change): - self.downsample_factor = change.new - self.set_metadata() - - # Upsampling - def _update_upsample_factor(self, change): - self.upsample_factor = change.new - self._set_metadata_obj_specific() - - # Batch size - def _update_num_batches(self, change): - self.num_batches = change.new - self.progress_phase_cross_corr.max = change.new - self.progress_shifting.max = change.new - self.progress_reprj.max = change.new - self.set_metadata() - - # X Padding - def _update_x_padding(self, change): - self.paddingX = change.new - self.set_metadata() - - # Y Padding - def _update_y_padding(self, change): - self.paddingY = change.new - self.set_metadata() - - # Extra options - def _update_extra_options(self, change): - self.extra_options = change.new - self.set_metadata() - - def _set_observes(self): - - # -- Radio to turn on tab --------------------------------------------- - self.radio_tab.observe(self._activate_tab, names="index") - - # -- Load metadata button --------------------------------------------- - self.load_metadata_button.on_click(self._load_metadata_all_on_click) - - # -- Plot projections button ------------------------------------------ - self.plot_prj_images_button.on_click(self._plot_prjs_on_click) - - # -- Radio to turn on partial dataset --------------------------------- - self.radio_fulldataset.observe(self._activate_full_partial, names="index") - - # -- Sliders ---------------------------------------------------------- - self.prj_range_x_slider.observe(self._prj_range_x_update, names="value") - - self.prj_range_y_slider.observe(self._prj_range_y_update, names="value") - # -- Options ---------------------------------------------------------- - - # Center - self.center_textbox.observe(self._update_center_textbox, names="value") - - # Downsampling - self.downsample_checkbox.observe(self._downsample_turn_on) - self.downsample_factor_textbox.observe( - self._update_downsample_factor_dict, names="value" - ) - - # X Padding - self.paddingX_textbox.observe(self._update_x_padding, names="value") - - # Y Padding - self.paddingY_textbox.observe(self._update_y_padding, names="value") - - # Extra options - self.extra_options_textbox.observe(self._update_extra_options, names="value") - - def _set_observes_obj_specific(self): - - # -- Set observes only for alignment ---------------------------------- - self.num_iterations_textbox.observe(self._update_num_iter, names="value") - self.num_batches_textbox.observe(self._update_num_batches, names="value") - self.upsample_factor_textbox.observe( - self._update_upsample_factor, names="value" - ) - self.start_button.on_click(self.set_options_and_run) - - def make_tab(self): - """ - Creates an alignment tab. - """ - - # -- Saving ----------------------------------------------------------- - save_hbox = HBox( - self.save_opts_checkboxes, - layout=Layout(flex_wrap="wrap", justify_content="space-between"), - ) - - self.save_options_accordion = Accordion( - children=[save_hbox], - selected_index=None, - titles=("Save Options",), - ) - - # -- Methods ---------------------------------------------------------- - tomopy_methods_hbox = HBox( - [ - Label("Tomopy:", layout=Layout(width="200px", align_content="center")), - HBox( - self.tomopy_methods_checkboxes, - layout=widgets.Layout(flex_flow="row wrap"), - ), - ] - ) - astra_methods_hbox = HBox( - [ - Label("Astra:", layout=Layout(width="100px", align_content="center")), - HBox( - self.astra_cuda_methods_checkboxes, - layout=widgets.Layout(flex_flow="row wrap"), - ), - ] - ) - - recon_method_box = VBox( - [tomopy_methods_hbox, astra_methods_hbox], - layout=widgets.Layout(flex_flow="row wrap"), - ) - self.methods_accordion = Accordion( - children=[recon_method_box], selected_index=None, titles=("Methods",) - ) - - # -- Box organization ------------------------------------------------- - - pixel_range_slider_vb = VBox( - [ - self.prj_range_x_slider, - self.prj_range_y_slider, - ], - layout=Layout(width="40%"), - justify_content="center", - align_items="space-between", - ) - - top_of_box_hb = HBox( - [ - self.radio_description, - self.radio_tab, - self.partial_radio_description, - self.radio_fulldataset, - pixel_range_slider_vb, - ], - layout=Layout( - width="auto", - justify_content="center", - align_items="center", - flex="flex-grow", - ), - ) - start_button_hb = HBox( - [self.start_button], layout=Layout(width="auto", justify_content="center") - ) - - self.options_accordion = Accordion( - children=[ - HBox( - [ - self.num_iterations_textbox, - self.center_textbox, - self.upsample_factor_textbox, - self.num_batches_textbox, - self.paddingX_textbox, - self.paddingY_textbox, - self.downsample_checkbox, - self.downsample_factor_textbox, - self.extra_options_textbox, - ], - layout=Layout( - flex_flow="row wrap", justify_content="space-between" - ), - ), - ], - selected_index=None, - titles=("Alignment Options",), - ) - - progress_hbox = HBox( - [ - self.progress_total, - self.progress_reprj, - self.progress_phase_cross_corr, - self.progress_shifting, - ] - ) - - self.tab = VBox( - children=[ - top_of_box_hb, - self.plotter_accordion, - self.load_metadata_button, - self.methods_accordion, - self.save_options_accordion, - self.options_accordion, - start_button_hb, - progress_hbox, - VBox( - [self.plot_output1, self.plot_output2], - ), - ] - ) - - -class Recon(Align): - def __init__(self, Import, Center): - self.Center = Center - super()._init_attributes(Import, Center) - self.save_opts_list = ["tomo_before", "recon", "tiff", "npy"] - self.widget_type = "Recon" - init_widgets(self) # initialization of many widgets/some attributes - super().set_metadata() - super()._set_observes() - self._set_observes_obj_specific() - self.make_tab() - - # -- Observe functions for reconstruction --------------------------------- - - # Start button - def _set_options_and_run(self, change): - change.icon = "fas fa-cog fa-spin fa-lg" - change.description = ( - "Setting options and loading data into reconstruction algorithm(s)." - ) - try: - from tomopyui.backend.tomorecon import TomoRecon - - a = TomoRecon(self) - change.button_style = "success" - change.icon = "fa-check-square" - change.description = "Finished reconstruction." - except: - change.button_style = "warning" - change.icon = "exclamation-triangle" - change.description = "Something went wrong." - - # Batch size - def _update_num_batches(self, change): - self.num_batches = change.new - self.set_metadata() - - # Number of iterations - def _update_num_iter(self, change): - self.num_iter = change.new - self.set_metadata() - - def _set_observes_obj_specific(self): - - self.start_button.on_click(self._set_options_and_run) - self.num_batches_textbox.observe(self._update_num_batches, names="value") - self.num_iterations_textbox.observe(self._update_num_iter, names="value") - - # -- Create recon tab ----------------------------------------------------- - def make_tab(self): - - # -- Saving ----------------------------------------------------------- - save_hbox = HBox( - self.save_opts_checkboxes, - layout=Layout(flex_flow="row wrap", justify_content="space-between"), - ) - - self.save_options_accordion = Accordion( - children=[save_hbox], - selected_index=None, - titles=("Save Options",), - ) - - # -- Methods ---------------------------------------------------------- - tomopy_methods_hbox = HBox( - [ - Label("Tomopy:", layout=Layout(width="200px", align_content="center")), - HBox( - self.tomopy_methods_checkboxes, - layout=widgets.Layout(flex_flow="row wrap"), - ), - ], - layout=Layout(align_content="center"), - ) - astra_methods_hbox = HBox( - [ - Label("Astra:", layout=Layout(width="100px", align_content="center")), - HBox( - self.astra_cuda_methods_checkboxes, - layout=widgets.Layout(flex_flow="row wrap"), - ), - ], - layout=Layout(align_content="center"), - ) - - recon_method_box = VBox( - [tomopy_methods_hbox, astra_methods_hbox], - layout=widgets.Layout(flex_flow="row wrap"), - ) - self.methods_accordion = Accordion( - children=[recon_method_box], selected_index=None, titles=("Methods",) - ) - - # -- Box organization ------------------------------------------------- - - pixel_range_slider_vb = VBox( - [ - self.prj_range_x_slider, - self.prj_range_y_slider, - ], - layout=Layout(width="30%"), - justify_content="center", - align_items="space-between", - ) - - top_of_box_hb = HBox( - [ - self.radio_description, - self.radio_tab, - self.partial_radio_description, - self.radio_fulldataset, - pixel_range_slider_vb, - ], - layout=Layout( - width="auto", - justify_content="center", - align_items="center", - flex="flex-grow", - ), - ) - start_button_hb = HBox( - [self.start_button], layout=Layout(width="auto", justify_content="center") - ) - - self.options_accordion = Accordion( - children=[ - HBox( - [ - self.num_iterations_textbox, - self.center_textbox, - self.num_batches_textbox, - self.paddingX_textbox, - self.paddingY_textbox, - self.downsample_checkbox, - self.downsample_factor_textbox, - self.extra_options_textbox, - ], - layout=Layout(flex_flow="row wrap"), - ), - ], - selected_index=None, - titles=("Options",), - ) - - self.tab = VBox( - children=[ - top_of_box_hb, - self.plotter_accordion, - self.methods_accordion, - self.save_options_accordion, - self.options_accordion, - start_button_hb, - ] - ) - - -class DataExplorerTab: - def __init__( - self, - Align, - Recon, - ): - self.align_de = DataExplorer(Align) - self.recon_de = DataExplorer(Recon) - self.fb_de = DataExplorer() - - def create_data_explorer_tab(self): - self.recent_alignment_accordion = Accordion( - children=[self.align_de.data_plotter], - selected_index=None, - titles=("Plot Recent Alignments",), - ) - self.recent_recon_accordion = Accordion( - children=[self.recon_de.data_plotter], - selected_index=None, - titles=("Plot Recent Reconstructions",), - ) - self.analysis_browser_accordion = Accordion( - children=[self.fb_de.data_plotter], - selected_index=None, - titles=("Plot Any Analysis",), - ) - - self.tab = VBox( - children=[ - self.analysis_browser_accordion, - self.recent_alignment_accordion, - self.recent_recon_accordion, - ] - ) - - -class DataExplorer: - def __init__(self, obj: (Align or Recon) = None): - self.figs = None - self.images = None - self.scales = None - self.projection_num_sliders = None - self.imagestacks_metadata = None - self.plays = None - self.imagestacks = [np.zeros((15, 100, 100)) for i in range(2)] - self.linked_stacks = False - self.obj = obj - self._init_widgets() - - def _init_widgets(self): - self.button_style = {"font_size": "22px"} - self.button_layout = Layout(width="45px", height="40px") - self.Plotter = Plotter() - self.app_output = Output() - if self.obj is not None: - if self.obj.widget_type == "Align": - self.run_list_selector = Select( - options=[], - rows=5, - description="Alignments:", - disabled=False, - style=extend_description_style, - layout=Layout(justify_content="center"), - ) - self.linked_stacks = True - self.titles = ["Before Alignment", "After Alignment"] - self.load_run_list_button = Button( - description="Load alignment list", - icon="download", - button_style="info", - layout=Layout(width="auto"), - ) - else: - self.run_list_selector = Select( - options=[], - rows=5, - description="Reconstructions:", - disabled=False, - style=extend_description_style, - layout=Layout(justify_content="center"), - ) - self.linked_stacks = False - self.titles = ["Projections", "Reconstruction"] - self.load_run_list_button = Button( - description="Load reconstruction list", - icon="download", - button_style="info", - layout=Layout(width="auto"), - ) - - self.run_list_selector.observe(self.choose_file_to_plot, names="value") - self.load_run_list_button.on_click(self._load_run_list_on_click) - self._create_plotter_run_list() - else: - self.titles = ["Projections", "Reconstruction"] - self.filebrowser = Filebrowser() - self.filebrowser.create_file_browser() - self.filebrowser.load_data_button.on_click(self.load_data_from_filebrowser) - self._create_plotter_filebrowser() - - def load_data_from_filebrowser(self, change): - metadata = {} - metadata["fpath"] = self.filebrowser.metadata["parent_fpath"] - metadata["fname"] = self.filebrowser.metadata["parent_fname"] - metadata["angle_start"] = self.filebrowser.metadata["angle_start"] - metadata["angle_end"] = self.filebrowser.metadata["angle_end"] - tomo = TomoData(metadata=metadata) - self.imagestacks[0] = tomo.prj_imgs - metadata["fpath"] = str(self.filebrowser.selected_method) - metadata["fname"] = str(self.filebrowser.selected_data_fname) - tomo = TomoData(metadata=metadata) - # TODO: make this agnostic to recon/tomo - self.imagestacks[1] = tomo.prj_imgs - if self.filebrowser.selected_analysis_type == "recon": - self.titles = ["Projections", "Reconstruction"] - else: - self.titles = ["Before Alignment", "After Alignment"] - self.create_figures_and_widgets() - self._create_image_app() - - def find_file_in_metadata(self, foldername): - for run in range(len(self.obj.run_list)): - if foldername in self.obj.run_list[run]: - metadata = {} - metadata["fpath"] = self.obj.run_list[run][foldername]["parent_fpath"] - metadata["fname"] = self.obj.run_list[run][foldername]["parent_fname"] - metadata["angle_start"] = self.obj.run_list[run][foldername][ - "angle_start" - ] - metadata["angle_end"] = self.obj.run_list[run][foldername]["angle_end"] - self.imagestacks[0] = TomoData(metadata=metadata).prj_imgs - metadata["fpath"] = self.obj.run_list[run][foldername]["savedir"] - if self.obj.widget_type == "Align": - metadata["fname"] = "projections_after_alignment.tif" - else: - metadata["fname"] = "recon.tif" - self.imagestacks[1] = TomoData(metadata=metadata).prj_imgs - self.create_figures_and_widgets() - self._create_image_app() - - def create_figures_and_widgets(self): - if self.linked_stacks: - ( - self.figs, - self.images, - self.scales, - self.projection_num_sliders, - self.plays, - ) = self.Plotter._create_two_plots_with_single_slider( - self.imagestacks, self.titles - ) - self.projection_num_sliders = [self.projection_num_sliders] - self.plays = [self.plays] - - else: - ( - self.figs, - self.images, - self.scales, - self.projection_num_sliders, - self.plays, - ) = self.Plotter._create_two_plots_with_two_sliders( - self.imagestacks, self.titles - ) - - self.vmin_vmax_sliders = self._create_vmin_vmax_sliders() - self.remove_high_low_intensity_buttons = ( - self._create_remove_high_low_intensity_buttons() - ) - self.swapaxes_buttons = self._create_swapaxes_buttons() - self.reset_button = Button( - icon="redo", style=self.button_style, layout=self.button_layout - ) - self.reset_button.on_click(self._reset_on_click) - - def _create_vmin_vmax_sliders(self): - vmin = self.imagestacks[0].min() - vmax = self.imagestacks[0].max() - slider1 = FloatRangeSlider( - description="vmin-vmax:", - min=vmin, - max=vmax, - step=(vmax - vmin) / 1000, - value=(vmin, vmax), - orientation="vertical", - ) - - def change_vmin_vmax1(change): - self.scales[0]["image"].min = change["new"][0] - self.scales[0]["image"].max = change["new"][1] - - slider1.observe(change_vmin_vmax1, names="value") - - vmin = self.imagestacks[1].min() - vmax = self.imagestacks[1].max() - slider2 = FloatRangeSlider( - description="vmin-vmax:", - min=vmin, - max=vmax, - step=(vmax - vmin) / 1000, - value=(vmin, vmax), - orientation="vertical", - ) - - def change_vmin_vmax2(change): - self.scales[1]["image"].min = change["new"][0] - self.scales[1]["image"].max = change["new"][1] - - slider2.observe(change_vmin_vmax2, names="value") - - sliders = [slider1, slider2] - - return sliders - - def _create_swapaxes_buttons(self): - def swapaxes_on_click1(change): - # defaults to going with the high/low value from - self.imagestacks[0] = self.Plotter._swap_axes_on_click( - self.imagestacks[0], - self.images[0], - self.projection_num_sliders[0], - ) - - def swapaxes_on_click2(change): - # defaults to going with the high/low value from - self.imagestacks[1] = self.Plotter._swap_axes_on_click( - self.imagestacks[1], - self.images[1], - self.projection_num_sliders[1], - ) - - button1 = Button( - icon="random", layout=self.button_layout, style=self.button_style - ) - # button1.button_style = "info" - button2 = Button( - icon="random", layout=self.button_layout, style=self.button_style - ) - # button2.button_style = "info" - button1.on_click(swapaxes_on_click1) - button2.on_click(swapaxes_on_click2) - buttons = [button1, button2] - return buttons - - def _create_remove_high_low_intensity_buttons(self): - """ - Parameters - ---------- - imagestack: np.ndarray - images that it will use to find vmin, vmax - this is found by - getting the 0.5 and 99.5 percentiles of the data - scale: dict - - """ - - def remove_high_low_intensity_on_click1(change): - # defaults to going with the high/low value from - self.Plotter._remove_high_low_intensity_on_click( - self.imagestacks[0], self.scales[0], self.vmin_vmax_sliders[0] - ) - - def remove_high_low_intensity_on_click2(change): - # defaults to going with the high/low value from - self.Plotter._remove_high_low_intensity_on_click( - self.imagestacks[1], self.scales[1], self.vmin_vmax_sliders[1] - ) - - button1 = Button( - icon="adjust", layout=self.button_layout, style=self.button_style - ) - button1.button_style = "info" - button2 = Button( - icon="adjust", layout=self.button_layout, style=self.button_style - ) - button2.button_style = "info" - button1.on_click(remove_high_low_intensity_on_click1) - button2.on_click(remove_high_low_intensity_on_click2) - buttons = [button1, button2] - return buttons - - def choose_file_to_plot(self, change): - self.find_file_in_metadata(change.new) - - def _load_run_list_on_click(self, change): - self.load_run_list_button.button_style = "info" - self.load_run_list_button.icon = "fas fa-cog fa-spin fa-lg" - self.load_run_list_button.description = "Importing run list." - # creates a list from the keys in pythonic way - # from https://stackoverflow.com/questions/11399384/extract-all-keys-from-a-list-of-dictionaries - # don't know how it works - self.run_list_selector.options = list( - set().union(*(d.keys() for d in self.obj.run_list)) - ) - self.load_run_list_button.button_style = "success" - self.load_run_list_button.icon = "fa-check-square" - self.load_run_list_button.description = "Finished importing run list." - - def _reset_on_click(self, change): - self.create_figures_and_widgets() - self._create_image_app() - - def _create_image_app(self): - left_sidebar_layout = Layout( - justify_content="space-around", align_items="center" - ) - right_sidebar_layout = Layout( - justify_content="space-around", align_items="center" - ) - footer_layout = Layout(justify_content="center") - header = None - - self.button_box1 = VBox( - [ - self.reset_button, - self.remove_high_low_intensity_buttons[0], - self.swapaxes_buttons[0], - ], - layout=left_sidebar_layout, - ) - self.button_box2 = VBox( - [ - self.reset_button, - self.remove_high_low_intensity_buttons[1], - self.swapaxes_buttons[1], - ], - layout=right_sidebar_layout, - ) - - left_sidebar = VBox( - [self.vmin_vmax_sliders[0], self.button_box1], layout=left_sidebar_layout - ) - center = HBox(self.figs, layout=Layout(justify_content="center")) - right_sidebar = VBox( - [self.vmin_vmax_sliders[1], self.button_box2], layout=right_sidebar_layout - ) - if self.linked_stacks: - footer = HBox( - self.plays + self.projection_num_sliders, layout=footer_layout - ) - else: - footer = HBox( - [ - HBox([self.plays[0], self.projection_num_sliders[0]]), - HBox([self.plays[1], self.projection_num_sliders[1]]), - ], - layout=footer_layout, - ) - self.image_app = AppLayout( - header=header, - left_sidebar=left_sidebar, - center=center, - right_sidebar=right_sidebar, - footer=footer, - pane_widths=[0.5, 5, 0.5], - pane_heights=[0, 10, "40px"], - height="auto", - ) - with self.app_output: - self.app_output.clear_output(wait=True) - display(self.image_app) - - def _create_plotter_run_list(self): - # self.create_figures_and_widgets() - # self._create_image_app() - self.data_plotter = VBox( - [self.load_run_list_button, self.run_list_selector, self.app_output] - ) - - def _create_plotter_filebrowser(self): - # self.create_figures_and_widgets() - # self._create_image_app() - self.data_plotter = VBox([self.filebrowser.filebrowser, self.app_output]) - - -class Filebrowser: - def __init__(self): - - # parent directory filechooser - self.orig_data_fc = FileChooser() - self.orig_data_fc.register_callback(self.update_orig_data_folder) - self.fc_label = Label("Original Data", layout=Layout(justify_content="Center")) - - # subdirectory selector - self.subdir_list = [] - self.subdir_label = Label( - "Analysis Directories", layout=Layout(justify_content="Center") - ) - self.subdir_selector = Select(options=self.subdir_list, rows=5, disabled=False) - self.subdir_selector.observe(self.populate_methods_list, names="value") - self.selected_subdir = None - - # method selector - self.methods_label = Label("Methods", layout=Layout(justify_content="Center")) - self.methods_list = [] - self.methods_selector = Select( - options=self.methods_list, rows=5, disabled=False - ) - self.methods_selector.observe(self.populate_data_list, names="value") - self.selected_method = None - - # data selector - self.data_label = Label("Data", layout=Layout(justify_content="Center")) - self.data_list = [] - self.data_selector = Select(options=self.data_list, rows=5, disabled=False) - - self.data_selector.observe(self.set_data_filename, names="value") - self.allowed_extensions = (".npy", ".tif", ".tiff") - self.selected_data_fname = None - self.selected_data_ftype = None - self.selected_analysis_type = None - - self.options_metadata_table_output = Output() - - # load data button - self.load_data_button = Button( - icon="upload", - style={"font_size": "35px"}, - button_style="info", - layout=Layout(width="75px", height="86px"), - ) - - def populate_subdirs_list(self): - self.subdir_list = [ - pathlib.PurePath(f) for f in os.scandir(self.root_fpath) if f.is_dir() - ] - self.subdir_list = [ - subdir.parts[-1] - for subdir in self.subdir_list - if any(x in subdir.parts[-1] for x in ("-align", "-recon")) - ] - self.subdir_selector.options = self.subdir_list - - def update_orig_data_folder(self): - self.root_fpath = self.orig_data_fc.selected_path - self.populate_subdirs_list() - self.methods_selector.options = [] - - def populate_methods_list(self, change): - self.selected_subdir = pathlib.PurePath(self.root_fpath) / change.new - self.methods_list = [ - pathlib.PurePath(f) for f in os.scandir(self.selected_subdir) if f.is_dir() - ] - self.methods_list = [ - subdir.parts[-1] - for subdir in self.methods_list - if not any(x in subdir.parts[-1] for x in ("-align", "-recon")) - ] - self.methods_selector.options = self.methods_list - - def populate_data_list(self, change): - if change.new is not None: - self.selected_method = ( - pathlib.PurePath(self.root_fpath) / self.selected_subdir / change.new - ) - self.file_list = [ - pathlib.PurePath(f) - for f in os.scandir(self.selected_method) - if not f.is_dir() - ] - self.data_list = [ - file.name - for file in self.file_list - if any(x in file.name for x in self.allowed_extensions) - ] - self.data_selector.options = self.data_list - self.load_metadata() - else: - self.data_selector.options = [] - - def set_data_filename(self, change): - self.selected_data_fname = change.new - self.selected_data_ftype = pathlib.PurePath(self.selected_data_fname).suffix - if "recon" in pathlib.PurePath(self.selected_subdir).name: - self.selected_analysis_type = "recon" - elif "align" in pathlib.PurePath(self.selected_subdir).name: - self.selected_analysis_type = "align" - - def load_metadata(self): - self.metadata_file = [ - self.selected_method / file.name - for file in self.file_list - if "metadata.json" in file.name - ] - if self.metadata_file != []: - self.metadata = load_metadata(fullpath=self.metadata_file[0]) - self.options_table = metadata_to_DataFrame(self.metadata) - with self.options_metadata_table_output: - self.options_metadata_table_output.clear_output(wait=True) - display(self.options_table) - - def create_file_browser(self): - fc = VBox([self.fc_label, self.orig_data_fc]) - subdir = VBox([self.subdir_label, self.subdir_selector]) - methods = VBox([self.methods_label, self.methods_selector]) - data = VBox([self.data_label, self.data_selector]) - button = VBox( - [ - Label("Upload", layout=Layout(justify_content="center")), - self.load_data_button, - ] - ) - top_hb = HBox( - [fc, subdir, methods, data, button], - layout=Layout(justify_content="center"), - align_items="stretch", - ) - box = VBox( - [top_hb, self.options_metadata_table_output], - layout=Layout(justify_content="center", align_items="center"), - ) - self.filebrowser = box diff --git a/tomopyui/widgets/prep.py b/tomopyui/widgets/prep.py new file mode 100644 index 0000000..5f9d054 --- /dev/null +++ b/tomopyui/widgets/prep.py @@ -0,0 +1,830 @@ +import numpy as np +import copy +import datetime +import pathlib +import dask.array as da + +from ipywidgets import * +from abc import ABC, abstractmethod +from functools import partial +from tomopyui._sharedvars import * +from tomopyui.backend.io import Projections_Child +from tomopyui.widgets.imports import ShiftsUploader, TwoEnergyUploader +from tomopyui.widgets.view import ( + BqImViewer_Projections_Parent, + BqImViewer_Projections_Child, + BqImViewer_TwoEnergy_High, + BqImViewer_TwoEnergy_Low, +) +from tomopyui.backend.util.padding import * +from tomopyui.backend.io import Metadata_Prep + + +if os.environ["cuda_enabled"] == "True": + from ..tomocupy.prep.alignment import shift_prj_cp, batch_cross_correlation + from ..tomocupy.prep.sampling import shrink_and_pad_projections + +import tomopy.misc.corr as tomocorr + + +class Prep(ABC): + def __init__(self, Import): + self.init_attributes(Import) + self.init_widgets() + self.set_observes() + self.make_tab() + + def init_attributes(self, Import): + + self.Import = Import + self.Import.Prep = self + self.projections = Import.projections + self.altered_projections = Projections_Child(self.projections) + self.prep_list = [] + self.metadata = {} + self.accordions_open = False + self.preview_only = False + self.tomocorr_median_filter_size = 3 + self.tomocorr_gaussian_filter_order = 0 + self.tomocorr_gaussian_filter_sigma = 3 + self.save_on = False + self.metadata = Metadata_Prep() + + def init_widgets(self): + """ + Initializes widgets in the Prep tab. + """ + self.header_font_style = { + "font_size": "22px", + "font_weight": "bold", + "font_variant": "small-caps", + } + self.button_font = {"font_size": "22px"} + self.button_layout = Layout(width="45px", height="40px") + + # -- Main viewers -------------------------------------------------------------- + self.imported_viewer = BqImViewer_Projections_Parent() + self.imported_viewer.create_app() + self.altered_viewer = BqImViewer_Projections_Child(self.imported_viewer) + self.altered_viewer.create_app() + self.altered_viewer.ds_viewer_dropdown.options = [("Original", -1)] + self.altered_viewer.ds_viewer_dropdown.value = -1 + + # -- Headers for plotting ------------------------------------- + self.import_plot_header = "Imported Projections" + self.import_plot_header = Label( + self.import_plot_header, style=self.header_font_style + ) + self.altered_plot_header = "Altered Projections" + self.altered_plot_header = Label( + self.altered_plot_header, style=self.header_font_style + ) + + # -- Header for methods ------------------------------------- + self.prep_list_header = "Methods" + self.prep_list_header = Label( + self.prep_list_header, style=self.header_font_style + ) + + # -- Prep List ------------------------------------- + self.prep_list_select = Select( + options=["Method 1", "Method 2", "Method 3", "Method 4", "Method 5"], + rows=10, + disabled=True, + ) + # -- Buttons for methods list ------------------------------------- + self.up_button = Button( + disabled=True, + icon="arrow-up", + tooltip="Move method up.", + layout=self.button_layout, + style=self.button_font, + ) + self.down_button = Button( + disabled=True, + icon="arrow-down", + tooltip="Move method down.", + layout=self.button_layout, + style=self.button_font, + ) + self.remove_method_button = Button( + disabled=True, + icon="fa-minus-square", + tooltip="Remove selected method.", + layout=self.button_layout, + style=self.button_font, + ) + self.start_button = Button( + disabled=True, + button_style="info", + tooltip=( + "Run the list above. " + + "This will save a subdirectory with your processed images." + ), + icon="fa-running", + layout=self.button_layout, + style=self.button_font, + ) + self.preview_only_button = Button( + disabled=True, + button_style="", + tooltip=( + "Run the currently selected image through your list of methods. " + + "This will not run the stack or save data." + ), + icon="glasses", + layout=self.button_layout, + style=self.button_font, + ) + self.save_on_button = Button( + disabled=True, + button_style="", + tooltip=("Turn this on to save the data when you click the run button."), + icon="fa-file-export", + layout=self.button_layout, + style=self.button_font, + ) + self.methods_button_box = VBox( + [ + HBox( + [ + self.up_button, + self.down_button, + self.remove_method_button, + ] + ), + HBox( + [ + self.preview_only_button, + self.start_button, + self.save_on_button, + ] + ), + ] + ) + + # -- Main Viewers ------------------------------------------------------------- + + self.viewer_hbox = HBox( + [ + VBox( + [ + self.import_plot_header, + self.imported_viewer.app, + ], + layout=Layout(align_items="center"), + ), + VBox( + [ + self.prep_list_header, + self.prep_list_select, + self.methods_button_box, + ], + layout=Layout(align_items="center", align_content="center"), + ), + VBox( + [ + self.altered_plot_header, + self.altered_viewer.app, + ], + layout=Layout(align_items="center"), + ), + ], + layout=Layout(justify_content="center", align_items="center"), + ) + + # -- Shifts uploader -------------------------------------------------------- + self.shifts_uploader = ShiftsUploader(self) + self.shift_x_header = "Shift in X" + self.shift_x_header = Label(self.shift_x_header, style=self.header_font_style) + self.shifts_sx_select = Select( + options=[], + rows=10, + disabled=True, + ) + self.shift_y_header = "Shift in Y" + self.shift_y_header = Label(self.shift_y_header, style=self.header_font_style) + self.shifts_sy_select = Select( + options=[], + rows=10, + disabled=True, + ) + self.shifts_filechooser_label = "Filechooser" + self.shifts_filechooser_label = Label( + self.shifts_filechooser_label, style=self.header_font_style + ) + + # -- List manipulation --------------------------------------------------------- + self.buttons_to_disable = [ + self.prep_list_select, + self.start_button, + self.up_button, + self.down_button, + self.remove_method_button, + self.start_button, + self.preview_only_button, + self.save_on_button, + ] + + # -- Add preprocessing steps widgets ------------------------------------------- + + # tomopy.misc.corr Median Filter + self.tomocorr_median_filter_button = Button( + description="Median Filter", + button_style="", + tooltip="Add a median filter to your data.", + icon="fa-filter", + layout=Layout(width="auto"), + ) + # tomopy.misc.corr Median Filter options + self.tomocorr_median_filter_size_dd = Dropdown( + description="Size", + options=list((str(i), i) for i in range(1, 25, 2)), + value=3, + ) + + self.tomocorr_median_filter_box = HBox( + [ + self.tomocorr_median_filter_button, + self.tomocorr_median_filter_size_dd, + ] + ) + # tomopy.misc.corr Gaussian Filter + self.tomocorr_gaussian_filter_button = Button( + description="Gaussian Filter", + button_style="", + tooltip="Add a gaussian filter to your data.", + icon="fa-filter", + layout=Layout(width="auto"), + ) + # tomopy.misc.corr Gaussian Filter options + self.tomocorr_gaussian_filter_sigma_tb = BoundedFloatText( + description="σ (stdv)", + value=3, + min=0, + max=25, + ) + self.tomocorr_gaussian_filter_order_dd = Dropdown( + description="Order", + options=[("Zeroth", 0), ("First", 1), ("Second", 2), ("Third", 3)], + value=0, + min=0, + max=25, + ) + + self.tomocorr_gaussian_filter_box = HBox( + [ + self.tomocorr_gaussian_filter_button, + self.tomocorr_gaussian_filter_sigma_tb, + self.tomocorr_gaussian_filter_order_dd, + ], + ) + + self.remove_data_outside_button = Button( + description="Set data = 0 outside of current histogram range.", + layout=Layout(width="auto", height="auto"), + ) + self.remove_data_outside_button.on_click(self.remove_data_outside) + + self.renormalize_by_roi_button = Button( + description="Click to normalize by ROI.", + button_style="info", + layout=Layout(width="auto", height="auto"), + disabled=False, + ) + self.renormalize_by_roi_button.on_click(self.add_ROI_background) + + self.roi_buttons_box = VBox( + [self.remove_data_outside_button, self.renormalize_by_roi_button] + ) + self.prep_buttons = [ + self.tomocorr_median_filter_box, + self.tomocorr_gaussian_filter_box, + self.roi_buttons_box, + ] + + # -- Widgets for shifting other energies tool ---------------------------------- + self.high_e_viewer = BqImViewer_TwoEnergy_High() + self.low_e_viewer = BqImViewer_TwoEnergy_Low(self.high_e_viewer) + self.high_e_uploader = TwoEnergyUploader(self.high_e_viewer) + self.low_e_uploader = TwoEnergyUploader(self.low_e_viewer) + self.high_e_header = "Shifted High Energy Projections" + self.high_e_header = Label(self.high_e_header, style=self.header_font_style) + self.low_e_header = "Moving Low Energy Projections" + self.low_e_header = Label(self.low_e_header, style=self.header_font_style) + self.low_e_viewer.scale_button.on_click(self.scale_low_e) + self.num_batches_textbox = IntText(description="Number of batches: ", value=5) + self.two_e_shift_uploaders_hbox = HBox( + [ + VBox( + [ + self.high_e_uploader.quick_path_label, + HBox( + [ + self.high_e_uploader.quick_path_search, + self.high_e_uploader.import_button.button, + ] + ), + self.high_e_uploader.filechooser, + ], + ), + VBox( + [ + self.low_e_uploader.quick_path_label, + HBox( + [ + self.low_e_uploader.quick_path_search, + self.low_e_uploader.import_button.button, + ] + ), + self.low_e_uploader.filechooser, + ], + ), + ], + layout=Layout(justify_content="center"), + ) + + self.two_e_shift_viewer_hbox = HBox( + [ + VBox( + [ + self.high_e_header, + self.high_e_viewer.app, + ], + layout=Layout(align_items="center"), + ), + VBox( + [ + self.low_e_header, + self.low_e_viewer.app, + self.num_batches_textbox, + ], + layout=Layout(align_items="center"), + ), + ], + layout=Layout(justify_content="center", align_items="center"), + ) + + self.two_e_shift_box = VBox( + [self.two_e_shift_uploaders_hbox, self.two_e_shift_viewer_hbox] + ) + + # -- Functions for Energy Scaling/Shifting ---------------------------- + def scale_low_e(self, *args): + self.low_e_viewer.projections.metadata.set_attributes_from_metadata( + self.low_e_viewer.projections + ) + self.high_e_viewer.projections.metadata.set_attributes_from_metadata( + self.low_e_viewer.projections + ) + low_e = self.low_e_viewer.projections.current_energy_float + high_e = self.high_e_viewer.projections.current_energy_float + num_batches = self.num_batches_textbox.value + high_e_prj = self.high_e_viewer.projections.data + self.low_e_viewer.scale_button.button_style = "info" + self.low_e_viewer.scale_button.icon = "fas fa-cog fa-spin fa-lg" + self.low_e_viewer.projections.data = shrink_and_pad_projections( + self.low_e_viewer.projections.data, high_e_prj, high_e, low_e, num_batches + ) + self.low_e_viewer.plot(self.low_e_viewer.projections) + self.low_e_viewer.start_button.disabled = False + self.low_e_viewer.scale_button.button_style = "success" + self.low_e_viewer.scale_button.icon = "fa-check-square" + self.low_e_viewer.diff_images = np.array( + [x / np.mean(x) for x in self.low_e_viewer.viewer_parent.original_images] + ) - np.array([x / np.mean(x) for x in self.low_e_viewer.original_images]) + self.low_e_viewer._original_images = self.low_e_viewer.original_images + self.low_e_viewer.diff_on = False + self.low_e_viewer._disable_diff_callback = True + self.low_e_viewer.diff_button.disabled = False + self.low_e_viewer._disable_diff_callback = False + + def register_low_e(self, *args): + high_range_x = self.high_e_viewer.px_range_x + high_range_y = self.high_e_viewer.px_range_y + low_range_x = self.low_e_viewer.px_range_x + low_range_y = self.low_e_viewer.px_range_y + low_range_x[1] = int(low_range_x[0] + (high_range_x[1] - high_range_x[0])) + low_range_y[1] = int(low_range_y[0] + (high_range_y[1] - high_range_y[0])) + self.low_e_viewer.start_button.button_style = "info" + self.low_e_viewer.start_button.icon = "fas fa-cog fa-spin fa-lg" + num_batches = self.num_batches_textbox.value + upsample_factor = 50 + shift_cpu = [] + low_e_data = self.low_e_viewer.projections.data[ + :, low_range_y[0] : low_range_y[1], low_range_x[0] : low_range_x[1] + ] + high_e_data = self.high_e_viewer.projections.data[ + :, high_range_y[0] : high_range_y[1], high_range_x[0] : high_range_x[1] + ] + + batch_cross_correlation( + low_e_data, + high_e_data, + shift_cpu, + num_batches, + upsample_factor, + blur=False, + subset_correlation=False, + subset_x=None, + subset_y=None, + mask_sim=False, + pad=(0, 0), + progress=None, + ) + shift_cpu = np.concatenate(shift_cpu, axis=1) + sx = shift_cpu[1] + sy = shift_cpu[0] + # TODO: send to GPU and do both calcs there. + self.low_e_viewer.projections.data = shift_prj_cp( + self.low_e_viewer.projections.data, + sx, + sy, + num_batches, + (0, 0), + use_pad_cond=False, + use_corr_prj_gpu=False, + ) + self.low_e_viewer.plot(self.low_e_viewer.projections) + self.low_e_viewer.diff_images = np.array( + [x / np.mean(x) for x in self.low_e_viewer.viewer_parent.original_images] + ) - np.array([x / np.mean(x) for x in self.low_e_viewer.original_images]) + self.low_e_viewer._original_images = self.low_e_viewer.original_images + self.low_e_viewer.diff_on = False + self.low_e_viewer._disable_diff_callback = True + self.low_e_viewer.diff_button.disabled = False + self.low_e_viewer.start_button.button_style = "success" + self.low_e_viewer.start_button.icon = "fa-check-square" + self.low_e_viewer._disable_diff_callback = False + + # -- Functions to add to list ---------------------------------------- + def add_shift(self, *args): + method = PrepMethod( + self, + "Shift", + shift_projections, + [ + list(self.shifts_uploader.sx), + list(self.shifts_uploader.sy), + ], + ) + self.prep_list.append(method.method_tuple) + self.update_prep_list() + + def add_tomocorr_median_filter(self, *args): + self.tomocorr_median_filter_size = self.tomocorr_median_filter_size_dd.value + method = PrepMethod( + self, + "Median Filter", + tomocorr.median_filter, + [ + self.tomocorr_median_filter_size, + ], + ) + self.prep_list.append(method.method_tuple) + self.update_prep_list() + + def add_tomocorr_gaussian_filter(self, *args): + self.tomocorr_gaussian_filter_sigma = ( + self.tomocorr_gaussian_filter_sigma_tb.value + ) + self.tomocorr_gaussian_filter_order = ( + self.tomocorr_gaussian_filter_order_dd.value + ) + method = PrepMethod( + self, + "Gaussian Filter", + tomocorr.gaussian_filter, + [ + self.tomocorr_gaussian_filter_sigma, + self.tomocorr_gaussian_filter_order, + ], + ) + self.prep_list.append(method.method_tuple) + self.update_prep_list() + + def add_ROI_background(self, *args): + method = PrepMethod( + self, + "ROI Normalization", + renormalize_by_roi, + [ + self.imported_viewer.px_range_x, + self.imported_viewer.px_range_y, + ], + ) + self.prep_list.append(method.method_tuple) + self.update_prep_list() + + def remove_data_outside(self, *args): + method = PrepMethod( + self, + "Remove Data Outside", + remove_data_outside, + [(self.imported_viewer.hist.vmin, self.imported_viewer.hist.vmax)], + ) + self.prep_list.append(method.method_tuple) + self.update_prep_list() + + def update_prep_list(self): + if self.prep_list == []: + self.prep_list_select.options = [ + "Method 1", + "Method 2", + "Method 3", + "Method 4", + "Method 5", + ] + for x in self.buttons_to_disable: + x.disabled = True + else: + self.prep_list_select.options = [x[0] for x in self.prep_list] + for x in self.buttons_to_disable: + x.disabled = False + + def refresh_plots(self): + self.imported_viewer.plot(self.projections, no_check=True) + self.altered_projections.parent_projections = self.projections + self.altered_projections.copy_from_parent() + self.altered_viewer.plot(self.projections, no_check=True) + + def set_observes(self): + + # Start button + self.start_button.on_click(self.run_prep_list) + + # Shifts - find upload callback in Imports + + # tomopy.misc.corr Median Filter + self.tomocorr_median_filter_button.on_click(self.add_tomocorr_median_filter) + + # tomopy.misc.corr Gaussian Filter + self.tomocorr_gaussian_filter_button.on_click(self.add_tomocorr_gaussian_filter) + + # Remove method + self.remove_method_button.on_click(self.remove_method) + + # Move method up + self.up_button.on_click(self.move_method_up) + + # Move method down + self.down_button.on_click(self.move_method_down) + + # Preview + self.preview_only_button.on_click(self.preview_only_on_off) + + # Save + self.save_on_button.on_click(self.save_on_off) + + # Registration + self.low_e_viewer.start_button.on_click(self.register_low_e) + + def update_shifts_list(self): + pass + + def save_on_off(self, *args): + if self.save_on: + self.save_on_button.button_style = "" + self.save_on = False + else: + self.save_on_button.button_style = "success" + self.save_on = True + + def preview_only_on_off(self, *args): + if self.preview_only: + self.preview_only_button.button_style = "" + self.preview_only = False + else: + self.preview_only_button.button_style = "success" + self.preview_only = True + + def remove_method(self, *args): + ind = self.prep_list_select.index + self.prep_list.pop(ind) + self.update_prep_list() + + def move_method_up(self, *args): + ind = self.prep_list_select.index + if ind != 0: + self.prep_list[ind], self.prep_list[ind - 1] = ( + self.prep_list[ind - 1], + self.prep_list[ind], + ) + self.update_prep_list() + self.prep_list_select.index = ind - 1 + + def move_method_down(self, *args): + ind = self.prep_list_select.index + if ind != len(self.prep_list) - 1: + self.prep_list[ind], self.prep_list[ind + 1] = ( + self.prep_list[ind + 1], + self.prep_list[ind], + ) + self.update_prep_list() + self.prep_list_select.index = ind + 1 + + # -- Load metadata button --------------------------------------------- + def _load_metadata_all_on_click(self, change): + self.load_metadata_button.button_style = "info" + self.load_metadata_button.icon = "fas fa-cog fa-spin fa-lg" + self.load_metadata_button.description = "Importing metadata." + self.load_metadata_button.button_style = "success" + self.load_metadata_button.icon = "fa-check-square" + self.load_metadata_button.description = "Finished importing metadata." + + # -- Button to start alignment ---------------------------------------- + def run_prep_list(self, change): + change.button_style = "info" + change.icon = "fas fa-cog fa-spin fa-lg" + self.run() + change.button_style = "success" + change.icon = "fa-check-square" + + def run(self): + if self.preview_only: + image_index = self.imported_viewer.image_index_slider.value + self.altered_viewer.image_index_slider.value = image_index + self.prepped_data = copy.deepcopy(self.altered_viewer.plotted_image.image) + self.prepped_data = self.prepped_data[np.newaxis, ...] + for prep_method_tuple in self.prep_list: + prep_method_tuple[1].update_method_and_run() + self.altered_viewer.plotted_image.image = self.prepped_data + else: + self.altered_projections.parent_projections = ( + self.imported_viewer.projections + ) + self.altered_projections.deepcopy_data_from_parent() + self.prepped_data = self.altered_projections.data + for num, prep_method_tuple in enumerate(self.prep_list): + prep_method_tuple[1].update_method_and_run() + self.prep_list_select.index = num + self.altered_projections.data = self.prepped_data + self.altered_viewer.images = self.altered_projections.data + self.altered_viewer.plotted_image.image = self.altered_projections.data[0] + if self.save_on: + self.make_prep_dir() + self.metadata.set_metadata(self) + self.metadata.filedir = self.filedir + self.metadata.save_metadata() + self.altered_projections.data = da.from_array( + self.altered_projections.data + ) + hist, r, bins, percentile = self.altered_projections._dask_hist() + grp = self.altered_projections.hdf_key_norm + "/" + data_dict = { + self.altered_projections.hdf_key_norm_proj: self.altered_projections.data, + grp + self.altered_projections.hdf_key_bin_frequency: hist[0], + grp + self.altered_projections.hdf_key_bin_edges: hist[1], + grp + self.altered_projections.hdf_key_image_range: r, + grp + self.altered_projections.hdf_key_percentile: percentile, + } + self.altered_projections.dask_data_to_h5( + data_dict, savedir=self.filedir + ) + self.altered_projections._dask_bin_centers( + grp, write=True, savedir=self.filedir + ) + + def make_prep_dir(self): + now = datetime.datetime.now() + dt_string = now.strftime("%Y%m%d-%H%M%S-prep") + self.filedir = pathlib.Path(self.Import.projections.filedir) / dt_string + self.filedir.mkdir() + # os.mkdir(self.filedir) + + def make_tab(self): + + # -- Box organization ------------------------------------------------- + + self.top_of_box_hb = HBox( + [self.Import.switch_data_buttons], + layout=Layout( + width="auto", + justify_content="flex-start", + ), + ) + self.viewer_accordion = Accordion( + children=[self.viewer_hbox], + selected_index=None, + titles=("Plot Projection Images",), + ) + self.prep_buttons_hbox = VBox( + self.prep_buttons, + layout=Layout(justify_content="center"), + ) + self.prep_buttons_accordion = Accordion( + children=[self.prep_buttons_hbox], + selected_index=None, + titles=("Add Preprocessing Methods",), + ) + self.two_e_shift_accordion = Accordion( + children=[self.two_e_shift_box], + selected_index=None, + titles=("Tool: shift projections.",), + ) + + self.shifts_box = HBox( + [ + VBox( + [ + self.shifts_uploader.quick_path_label, + self.shifts_uploader.quick_path_search, + ], + ), + VBox( + [ + self.shifts_filechooser_label, + self.shifts_uploader.filechooser, + ], + ), + VBox( + [ + self.shift_x_header, + self.shifts_sx_select, + ], + ), + VBox( + [ + self.shift_y_header, + self.shifts_sy_select, + ], + ), + self.shifts_uploader.import_button.button, + ], + layout=Layout(justify_content="center"), + ) + + self.shifts_accordion = Accordion( + children=[self.shifts_box], + selected_index=None, + titles=("Upload shifts from prior alignments",), + ) + self.tab = VBox( + children=[ + self.top_of_box_hb, + self.viewer_accordion, + self.prep_buttons_accordion, + self.shifts_accordion, + self.two_e_shift_accordion, + ] + ) + + +class PrepMethod: + def __init__(self, Prep, method_name: str, func, opts: list): + self.Prep = Prep + self.method_name = method_name + self.func = func + self.opts = opts + self.method_tuple = (self.method_name, self) + + def update_method_and_run(self): + self.partial_func = partial(self.func, self.Prep.prepped_data, *self.opts) + self.Prep.prepped_data = self.partial_func() + + +def shift_projections(projections, sx, sy): + new_prj_imgs = copy.deepcopy(projections) + pad_x = int(np.ceil(np.max(np.abs(sx)))) + pad_y = int(np.ceil(np.max(np.abs(sy)))) + pad = (pad_x, pad_y) + new_prj_imgs = pad_projections(new_prj_imgs, pad) + new_prj_imgs = shift_prj_cp( + new_prj_imgs, + sx, + sy, + 5, + pad, + use_corr_prj_gpu=False, + ) + return new_prj_imgs + + +def renormalize_by_roi(projections, px_range_x, px_range_y): + exp_full = np.exp(-projections) + averages = [ + np.mean( + exp_full[i, px_range_y[0] : px_range_y[1], px_range_x[0] : px_range_x[1]] + ) + for i in range(len(exp_full)) + ] + projections = [exp_full[i] / averages[i] for i in range(len(exp_full))] + projections = -np.log(projections) + return projections + + +def remove_data_outside(projections, vmin_vmax: tuple): + remove_high_indexes = projections > vmin_vmax[1] + projections[remove_high_indexes] = 1e-6 + remove_low_indexes = projections < vmin_vmax[0] + projections[remove_low_indexes] = 1e-6 + return projections + + +### May use? +# def rectangle_selector_on(self, change): +# time.sleep(0.1) +# if self.viewer.rectangle_selector_on: +# self.renormalize_by_roi_button.disabled = False +# else: +# self.renormalize_by_roi_button.disabled = True diff --git a/tomopyui/widgets/view.py b/tomopyui/widgets/view.py new file mode 100644 index 0000000..def9558 --- /dev/null +++ b/tomopyui/widgets/view.py @@ -0,0 +1,1278 @@ +import bqplot as bq +import numpy as np +import copy +import pathlib +import matplotlib.pyplot as plt +import matplotlib.animation as animation +import time + +from abc import ABC, abstractmethod +from bqplot_image_gl import ImageGL +from ipywidgets import * +from skimage.transform import rescale # look for better option +from tomopyui._sharedvars import * +from bqplot_image_gl.interacts import MouseInteraction, keyboard_events, mouse_events +from bqplot import PanZoom + + +class BqImViewerBase(ABC): + lin_schemes = [ + "viridis", + "plasma", + "inferno", + "magma", + "OrRd", + "PuBu", + "BuPu", + "Oranges", + "BuGn", + "YlOrBr", + "YlGn", + "Reds", + "RdPu", + "Greens", + "YlGnBu", + "Purples", + "GnBu", + "Greys", + "YlOrRd", + "PuRd", + "Blues", + "PuBuGn", + ] + + def __init__(self, images=None): + if images is None: + self.original_images = np.random.rand(5, 50, 50) + self.images = self.original_images + else: + self.original_images = images + self.images = images + self.current_image_ind = 0 + self.pxX = self.images.shape[2] + self.pxY = self.images.shape[1] + self.px_range_x = [0, self.pxX - 1] + self.px_range_y = [0, self.pxY - 1] + self.px_range = [self.px_range_x, self.px_range_y] + self.px_size = 1 + self.aspect_ratio = self.pxX / self.pxY + self.fig = None + self.ds_factor = 0 + self.dimensions = ("550px", "550px") + self.current_plot_axis = 0 + self.rectangle_selector_on = False + self.current_interval = 300 + self.from_hdf = False + self.from_npy = False + self._init_fig() + self._init_widgets() + self._init_hist() + self._init_observes() + self._init_links() + self._init_app() + + def _init_fig(self): + self.scale_x = bq.LinearScale(min=0, max=1) + self.scale_y = bq.LinearScale(min=1, max=0) + self.scale_x_y = {"x": self.scale_x, "y": self.scale_y} + self.fig = bq.Figure( + scales=self.scale_x_y, + fig_margin=dict(top=0, bottom=0, left=0, right=0), + padding_y=0, + padding_x=0, + ) + self.fig.layout.fig_margin = dict(top=0, bottom=0, left=0, right=0) + self.image_scale = { + "x": self.scale_x, + "y": self.scale_y, + "image": bq.ColorScale( + min=np.min(self.images), + max=np.max(self.images), + scheme="viridis", + ), + } + self.plotted_image = ImageGL( + image=self.images[self.current_image_ind], + scales=self.image_scale, + ) + self.fig.marks = (self.plotted_image,) + self.fig.layout.width = self.dimensions[0] + self.fig.layout.height = self.dimensions[1] + self.panzoom = PanZoom(scales={"x": [self.scale_x], "y": [self.scale_y]}) + self.msg_interaction = MouseInteraction( + x_scale=self.scale_x, + y_scale=self.scale_y, + move_throttle=70, + next=self.panzoom, + events=keyboard_events + mouse_events, + ) + self.fig.interaction = self.msg_interaction + + def _init_widgets(self): + + # Styles and layouts + self.button_font = {"font_size": "22px"} + self.button_layout = Layout(width="45px", height="40px") + + # Image index slider + self.image_index_slider = IntSlider( + value=0, + min=0, + max=self.images.shape[0] - 1, + step=1, + ) + + # Image index play button + self.play = Play( + value=0, + min=0, + max=self.images.shape[0] - 1, + step=1, + interval=self.current_interval, + disabled=False, + ) + + # Go faster on play button + self.plus_button = Button( + icon="plus", + layout=self.button_layout, + style=self.button_font, + tooltip="Speed up play slider.", + ) + + # Go slower on play button + self.minus_button = Button( + icon="minus", + layout=self.button_layout, + style=self.button_font, + tooltip="Slow down play slider slider.", + ) + + # Color scheme dropdown menu + self.scheme_dropdown = Dropdown( + description="Scheme:", options=self.lin_schemes, value="viridis" + ) + + # Swap axes button + self.swap_axes_button = Button( + icon="random", + layout=self.button_layout, + style=self.button_font, + tooltip="Swap axes", + ) + + # Remove high/low intensities button + self.rm_high_low_int_button = Button( + icon="adjust", + layout=self.button_layout, + style=self.button_font, + tooltip="Remove high and low intensities from view.", + ) + + # Save movie button + self.save_movie_button = Button( + icon="file-video", + layout=self.button_layout, + style=self.button_font, + tooltip="Save a movie of these images.", + ) + + # Downsample dropdown menu + self.ds_viewer_dropdown = Dropdown( + description="Viewer binning: ", + options=[("Original", -1), ("2", 0), ("4", 1), ("8", 2)], + value=0, + style=extend_description_style, + ) + + # Reset button + self.reset_button = Button( + icon="redo", + layout=self.button_layout, + style=self.button_font, + tooltip="Reset to original view.", + ) + + self.rectangle_selector_button = Button( + icon="far square", + layout=self.button_layout, + style=self.button_font, + tooltip="Select a region of interest.", + ) + # Rectangle selector + self.rectangle_selector = bq.interacts.BrushSelector( + x_scale=self.scale_x, + y_scale=self.scale_y, + ) + + # Status bar updates + self.status_bar_xrange = Label() + self.status_bar_yrange = Label() + self.status_bar_xrange.value = "" + self.status_bar_yrange.value = "" + self.status_bar_xdistance = Label() + self.status_bar_ydistance = Label() + self.status_bar_xdistance.value = "" + self.status_bar_ydistance.value = "" + self.status_bar_intensity = Label() + + def _init_hist(self): + # Histogram + self.hist = BqImHist(self) + + def _init_observes(self): + + # Image index slider + self.image_index_slider.observe(self.change_image, names="value") + + # Color scheme dropdown menu + self.scheme_dropdown.observe(self.update_scheme, names="value") + + # Swap axes button + self.swap_axes_button.on_click(self.swap_axes) + + # Faster play interval + self.plus_button.on_click(self.speed_up) + + # Slower play interval + self.minus_button.on_click(self.slow_down) + + # Save a movie at the current vmin/vmax + self.save_movie_button.on_click(self.save_movie) + + # Downsample dropdown menu + self.ds_viewer_dropdown.observe(self.downsample_viewer, "value") + + # Reset button + self.reset_button.on_click(self.reset) + + # Zoom/intensity + self.msg_interaction.on_msg(self.on_mouse_msg_intensity) + + # Rectangle selector + self.rectangle_selector.observe(self.rectangle_to_px_range, "selected") + + # Rectangle selector button + self.rectangle_selector_button.on_click(self.rectangle_select) + + def _init_links(self): + + # Image index slider and play button + jslink((self.play, "value"), (self.image_index_slider, "value")) + jslink((self.play, "min"), (self.image_index_slider, "min")) + jslink((self.play, "max"), (self.image_index_slider, "max")) + + # -- Callback functions ------------------------------------------------------------ + + # Image index + def change_image(self, change): + self.plotted_image.image = self.images[change.new] + self.current_image_ind = change.new + + # Scheme + def update_scheme(self, *args): + self.image_scale["image"].scheme = self.scheme_dropdown.value + + # Swap axes + def swap_axes(self, *args): + self.images = np.swapaxes(self.images, 0, 1) + self.change_aspect_ratio() + self.image_index_slider.max = self.images.shape[0] - 1 + self.image_index_slider.value = 0 + self.plotted_image.image = self.images[self.image_index_slider.value] + if self.current_plot_axis == 0: + self.current_plot_axis = 1 + else: + self.current_plot_axis = 0 + + # Downsample the plot view + def downsample_viewer(self, *args): + self.ds_factor = self.ds_viewer_dropdown.value + if self.from_hdf and self.ds_factor != -1: + self.projections._load_hdf_ds_data_into_memory(pyramid_level=self.ds_factor) + self.original_images = self.projections.data + self.images = self.projections.data_ds + self.hist.precomputed_hist = self.projections.hist + elif self.from_hdf and self.ds_factor == -1: + self.projections._load_hdf_normalized_data_into_memory() + self.original_images = self.projections.data + self.images = self.projections.data + self.hist.precomputed_hist = self.projections.hist + else: + if self.ds_factor == -1: + self.plotted_image.image = self.original_images[0] + self.images = self.original_images + self.change_aspect_ratio() + else: + self.images = copy.deepcopy(self.original_images) + ds_num = np.power(2, int(self.ds_factor) + 1) + ds_num = float(1 / ds_num) + self.images = rescale( + self.images, + (1, ds_num, ds_num), + anti_aliasing=False, + ) + self.plotted_image.image = self.images[self.image_index_slider.value] + self.change_aspect_ratio() + + # Reset + def reset(self, *args): + if self.current_plot_axis == 1: + self.swap_axes() + self.current_image_ind = 0 + self.change_aspect_ratio() + self.plotted_image.image = self.images[0] + self.hist.reset_state() + self.image_scale["image"].min = self.hist.vmin + self.image_scale["image"].max = self.hist.vmax + self.image_index_slider.max = self.images.shape[0] - 1 + self.image_index_slider.value = 0 + + # Speed up playback of play button + def speed_up(self, *args): + self.current_interval -= 50 + self.play.interval = self.current_interval + + # Slow down playback of play button + def slow_down(self, *args): + self.current_interval += 50 + self.play.interval = self.current_interval + + # Save movie + def save_movie(self, *args): + self.save_movie_button.icon = "fas fa-cog fa-spin fa-lg" + self.save_movie_button.button_style = "info" + fig, ax = plt.subplots(figsize=(10, 5)) + _ = ax.set_axis_off() + _ = fig.patch.set_facecolor("black") + ims = [] + vmin = self.image_scale["image"].min + vmax = self.image_scale["image"].max + if self.from_hdf: + self.projections._load_hdf_ds_data_into_memory( + self.ds_viewer_dropdown.value + ) + self.images = self.projections.data_ds + for image in self.images: + im = ax.imshow(image, animated=True, vmin=vmin, vmax=vmax) + ims.append([im]) + ani = animation.ArtistAnimation( + fig, ims, interval=300, blit=True, repeat_delay=1000 + ) + writer = animation.FFMpegWriter( + fps=20, codec=None, bitrate=1000, extra_args=None, metadata=None + ) + _ = ani.save(str(pathlib.Path(self.filedir / "movie.mp4")), writer=writer) + self.save_movie_button.icon = "file-video" + self.save_movie_button.button_style = "success" + + # Rectangle selector button + def rectangle_select(self, change): + if not self.rectangle_selector_on: + self.fig.interaction = self.rectangle_selector + self.fig.interaction.color = "red" + self.rectangle_selector_on = True + self.rectangle_selector_button.button_style = "success" + else: + self.rectangle_selector_button.button_style = "" + self.rectangle_selector_on = False + self.status_bar_xrange.value = "" + self.status_bar_yrange.value = "" + self.status_bar_xdistance.value = "" + self.status_bar_ydistance.value = "" + self.fig.interaction = self.msg_interaction + + # Rectangle selector to update projection range + def rectangle_to_px_range(self, *args): + self.px_range = self.rectangle_selector.selected + self.px_range = np.where(self.px_range < 0, 0, self.px_range) + self.px_range = np.where(self.px_range > 1, 1, self.px_range) + self.px_range_x = [ + int(x) for x in np.around(self.px_range[:, 0] * (self.pxX - 1)) + ] + self.px_range_y = np.around(self.px_range[:, 1] * (self.pxY - 1)) + self.px_range_y = [int(x) for x in self.px_range_y] + if self.px_size is not None: + self.nm_x = int((self.px_range_x[1] - self.px_range_x[0]) * self.px_size) + self.nm_y = int((self.px_range_y[1] - self.px_range_y[0]) * self.px_size) + self.micron_x = round(self.nm_x / 1000, 2) + self.micron_y = round(self.nm_y / 1000, 2) + self.update_px_range_status_bar() + + def update_px_range_status_bar(self): + self.status_bar_xrange.value = f"X Pixel Range: {self.px_range_x} | " + self.status_bar_yrange.value = f"Y Pixel Range: {self.px_range_y}" + if self.nm_x < 1000: + self.status_bar_xdistance.value = f"X Distance (nm): {self.nm_x} | " + else: + self.status_bar_xdistance.value = f"X Distance (μm): {self.micron_x} | " + if self.nm_y < 1000: + self.status_bar_ydistance.value = f"Y Distance (nm): {self.nm_y}" + else: + self.status_bar_ydistance.value = f"Y Distance (μm): {self.micron_y}" + + # Intensity message + def on_mouse_msg_intensity(self, interaction, data, buffers): + if data["event"] == "mousemove": + domain_x = data["domain"]["x"] + domain_y = data["domain"]["y"] + normalized_x = (domain_x - self.plotted_image.x[0]) / ( + self.plotted_image.x[1] - self.plotted_image.x[0] + ) + normalized_y = (domain_y - self.plotted_image.y[0]) / ( + self.plotted_image.y[1] - self.plotted_image.y[0] + ) + pixel_x = int(np.floor(normalized_x * self.plotted_image.image.shape[1])) + pixel_y = int(np.floor(normalized_y * self.plotted_image.image.shape[0])) + if ( + pixel_x >= 0 + and pixel_x < self.plotted_image.image.shape[1] + and pixel_y >= 0 + and pixel_y < self.plotted_image.image.shape[0] + ): + value = str(round(self.plotted_image.image[pixel_y, pixel_x], 5)) + else: + value = "Out of range" + msg = f"Intensity={value}" + self.status_bar_intensity.value = msg + elif data["event"] == "mouseleave": + self.status_bar_intensity.value = "" + elif data["event"] == "mouseenter": + self.status_bar_intensity.value = "Almost there..." # this is is not visible because mousemove overwrites the msg + else: + pass + # self.status_bar_intensity.value = f"click {data}" + + # -- Other methods ----------------------------------------------------------------- + def change_aspect_ratio(self): + pxX = self.images.shape[2] + pxY = self.images.shape[1] + self.aspect_ratio = pxX / pxY + if self.aspect_ratio >= 1.5: + self.aspect_ratio = 1.5 + if self.aspect_ratio <= 0.5: + self.aspect_ratio = 0.5 + if self.aspect_ratio >= 1: + if self.aspect_ratio > self.fig.max_aspect_ratio: + self.fig.max_aspect_ratio = self.aspect_ratio + self.fig.min_aspect_ratio = self.aspect_ratio + else: + self.fig.min_aspect_ratio = self.aspect_ratio + self.fig.max_aspect_ratio = self.aspect_ratio + # This is set to default dimensions of 550, not great: + self.fig.layout.height = str(int(550 / self.aspect_ratio)) + "px" + + def check_npy_or_hdf(self, projections): + if projections.hdf_file is not None: + self.from_hdf = True + self.from_npy = False + else: + self.from_hdf = False + self.from_npy = True + + def set_state_on_plot(self): + self.pxX = self.original_images.shape[2] + self.pxY = self.original_images.shape[1] + self.pxZ = self.original_images.shape[0] + self.px_range_x = [0, self.pxX - 1] + self.px_range_y = [0, self.pxY - 1] + self.px_range = [self.px_range_x, self.px_range_y] + self.plotted_image.image = self.images[0] + self.image_index_slider.max = self.pxZ - 1 + self.image_index_slider.value = 0 + self.current_image_ind = 0 + self.change_aspect_ratio() + self.hist.refresh_histogram() + + def get_ds_factor_from_dropdown(self): + ds_factor = self.ds_viewer_dropdown.value + if ds_factor == -1: + ds_factor = 1 + else: + ds_factor = np.power(2, int(ds_factor) + 1) + ds_factor = 1 / ds_factor + return ds_factor + + def _init_app(self): + self.header_layout = Layout(justify_content="center", align_items="center") + self.header = HBox( + [ + self.ds_viewer_dropdown, + self.scheme_dropdown, + ], + layout=self.header_layout, + ) + self.center_layout = Layout(justify_content="center", align_content="center") + self.center = HBox([self.fig, self.hist.fig], layout=self.center_layout) + self.footer_layout = Layout(justify_content="center") + self.footer1 = HBox( + [self.play, self.image_index_slider], layout=self.footer_layout + ) + self.init_buttons = [ + self.plus_button, + self.minus_button, + self.reset_button, + self.rm_high_low_int_button, + self.swap_axes_button, + self.rectangle_selector_button, + self.save_movie_button, + ] + self.all_buttons = self.init_buttons + + @abstractmethod + def plot(self, io_obj): + ... + + @abstractmethod + def create_app(self): + ... + + +class BqImViewer_Projections_Parent(BqImViewerBase): + def __init__(self): + super().__init__() + self.rectangle_selector_button.tooltip = ( + "Turn on the rectangular region selector. Select a region " + "and copy it over to Altered Projections." + ) + + def create_app(self): + self.button_box = HBox( + self.init_buttons, + layout=self.footer_layout, + ) + + footer2 = VBox( + [ + self.button_box, + HBox( + [ + self.status_bar_xrange, + self.status_bar_yrange, + self.status_bar_intensity, + ], + layout=self.footer_layout, + ), + HBox( + [ + self.status_bar_xdistance, + self.status_bar_ydistance, + ], + layout=self.footer_layout, + ), + ], + layout=self.footer_layout, + ) + footer = VBox([self.footer1, footer2]) + self.app = VBox([self.header, self.center, footer]) + + def plot(self, projections, ds=True, no_check=False): + self.projections = projections + self.filedir = self.projections.filedir + self.px_size = self.projections.px_size + if ds and not no_check: + self.projections._check_downsampled_data() + self.ds_viewer_dropdown.value = ( + 0 if any([0 == x[1] for x in self.ds_viewer_dropdown.options]) else -1 + ) + self.hist.precomputed_hist = self.projections.hist + self.original_images = self.projections.data + self.images = self.projections.data_ds + elif ds and no_check: + self.hist.precomputed_hist = self.projections.hist + self.original_images = self.projections.data + self.images = self.projections.data_ds + else: + self.ds_viewer_dropdown.value = -1 + self.ds_viewer_dropdown.options = [("Original", -1)] + self.original_images = self.projections.data + self.images = self.original_images + self.check_npy_or_hdf(projections) + self.set_state_on_plot() + + +class BqImViewer_Projections_Child(BqImViewer_Projections_Parent): + def __init__(self, viewer_parent): + self.viewer_parent = viewer_parent + super().__init__() + self.subset_x = None + self.subset_y = None + # Copy from plotter + self.copy_button = Button( + icon="file-import", + layout=self.button_layout, + style=self.button_font, + tooltip="Copy data from 'Imported Projections'.", + ) + self.copy_button.on_click(self.copy_parent_projections) + + self.link_plotted_projections_button = Button( + icon="unlink", + layout=self.button_layout, + style=self.button_font, + disabled=True, + tooltip="Link the sliders together.", + ) + self.link_plotted_projections_button.on_click(self.link_plotted_projections) + self.plots_linked = False + + self.range_from_parent_button = Button( + icon="object-ungroup", + layout=self.button_layout, + style=self.button_font, + disabled=True, + tooltip="Get range from 'Imported Projections'.", + ) + self.range_from_parent_button.on_click(self.range_from_parent) + + # Rectangle selector + self.rectangle_selector.observe(self.rectangle_to_px_range, "selected") + self.all_buttons.insert(-2, self.copy_button) + self.all_buttons.insert(-2, self.link_plotted_projections_button) + self.all_buttons.insert(-2, self.range_from_parent_button) + + def _init_hist(self): + self.hist = BqImHist_Child(self, self.viewer_parent) + + def create_app(self): + self.center = HBox([self.fig, self.hist.fig], layout=self.center_layout) + self.button_box = HBox( + self.all_buttons, + layout=self.footer_layout, + ) + footer2 = VBox( + [ + self.button_box, + HBox( + [ + self.status_bar_xrange, + self.status_bar_yrange, + self.status_bar_intensity, + ], + layout=Layout(justify_content="center"), + ), + ] + ) + + footer = VBox([self.footer1, footer2]) + + self.app = VBox([self.header, self.center, footer]) + + def plot(self, projections, ds=True, no_check=True): + super().plot(projections, ds=ds, no_check=no_check) + self.rm_high_low_int_button.disabled = False + self.subset_px_range_x = self.px_range_x + self.subset_px_range_y = self.px_range_y + + def copy_parent_projections(self, no_check=True, *args, **kwargs): + tic = time.perf_counter() + self.copying = True + self.projections.parent_projections = self.viewer_parent.projections + self.projections.copy_from_parent() + self.hist.copy_parent_hist() + self.plot(self.projections, ds=True, no_check=no_check) + self.hist.refresh_histogram() + self.link_plotted_projections_button.button_style = "info" + self.link_plotted_projections_button.disabled = False + self.range_from_parent_button.disabled = False + self.copying = False + + def link_plotted_projections(self, *args): + if not self.plots_linked: + self.plots_linked = True + self.plot_link = jsdlink( + (self.viewer_parent.image_index_slider, "value"), + (self.image_index_slider, "value"), + ) + self.link_plotted_projections_button.button_style = "success" + self.link_plotted_projections_button.icon = "link" + else: + self.plots_linked = False + self.plot_link.unlink() + self.link_plotted_projections_button.button_style = "info" + self.link_plotted_projections_button.icon = "unlink" + + def range_from_parent(self, *args): + if ( + self.viewer_parent.rectangle_selector_button.button_style == "success" + and self.viewer_parent.rectangle_selector.selected is not None + ): + ds_factor = self.viewer_parent.get_ds_factor_from_dropdown() + imtemp = self.viewer_parent.images + lowerY = int(self.viewer_parent.px_range_y[0] * ds_factor) + upperY = int(self.viewer_parent.px_range_y[1] * ds_factor) + lowerX = int(self.viewer_parent.px_range_x[0] * ds_factor) + upperX = int(self.viewer_parent.px_range_x[1] * ds_factor) + self.images = copy.deepcopy(imtemp[:, lowerY:upperY, lowerX:upperX]) + self.change_aspect_ratio() + self.plotted_image.image = self.images[self.viewer_parent.current_image_ind] + # This is confusing - decide on better names. The actual dimensions are + # stored in self.projections.px_range_x, but this will eventually set the + # Analysis attributes for px_range_x, px_range_y to input into + # algorithms + self.px_range_x = ( + self.viewer_parent.px_range_x[0], + self.viewer_parent.px_range_x[1], + ) + self.px_range_y = ( + self.viewer_parent.px_range_y[0], + self.viewer_parent.px_range_y[1], + ) + + # Rectangle selector to update projection range + def rectangle_to_px_range(self, *args): + self.px_range = self.rectangle_selector.selected + x_len = self.px_range_x[1] - self.px_range_x[0] + y_len = self.px_range_y[1] - self.px_range_y[0] + lowerX = int(self.px_range[0, 0] * x_len + self.px_range_x[0]) + upperX = int(self.px_range[1, 0] * x_len + self.px_range_x[0]) + lowerY = int(self.px_range[0, 1] * y_len + self.px_range_y[0]) + upperY = int(self.px_range[1, 1] * y_len + self.px_range_y[0]) + self.printed_range_x = [lowerX, upperX] + self.printed_range_y = [lowerY, upperY] + self.subset_x = [x - self.px_range_x[0] for x in self.printed_range_x] + self.subset_y = [y - self.px_range_y[0] for y in self.printed_range_y] + self.update_px_range_status_bar() + + # Rectangle selector button + def rectangle_select(self, change): + if self.rectangle_selector_on is False: + self.fig.interaction = self.rectangle_selector + self.fig.interaction.color = "magenta" + self.rectangle_selector_on = True + self.rectangle_selector_button.button_style = "success" + # self.Analysis.use_subset_correlation_checkbox.value = True + else: + self.rectangle_selector_button.button_style = "" + self.rectangle_selector_on = False + self.status_bar_xrange.value = "" + self.status_bar_yrange.value = "" + self.fig.interaction = self.msg_interaction + # self.Analysis.use_subset_correlation_checkbox.value = False + + def update_px_range_status_bar(self): + self.status_bar_xrange.value = ( + f"Phase Correlation X Pixel Range: {self.printed_range_x} | " + ) + self.status_bar_yrange.value = ( + f"Phase Correlation Y Pixel Range: {self.printed_range_y}" + ) + + +class BqImViewer_Center(BqImViewer_Projections_Parent): + def __init__(self): + super().__init__() + self.center_line_on = True + self.center_line = bq.Lines( + x=[0.5, 0.5], + y=[0, 1], + colors="red", + stroke_width=3, + scales={"x": self.scale_x, "y": self.scale_y}, + ) + self.slice_line = bq.Lines( + x=[0, 1], + y=[0.5, 0.5], + colors="green", + stroke_width=3, + scales={"x": self.scale_x, "y": self.scale_y}, + ) + self.slice_line_slider = IntSlider( + value=0, + min=0, + max=self.images.shape[0] - 1, + step=1, + orientation="vertical", + readout=False, + layout=Layout(height="auto"), + ) + self.tilted_center_line = bq.Lines( + x=[0.5, 0.5], + y=[0, 1], + colors="orangered", + line_style="dashed", + stroke_width=3, + scales={"x": self.scale_x, "y": self.scale_y}, + ) + self.scatter = bq.Scatter( + scales={"x": self.scale_x, "y": self.scale_y}, + colors=["orangered"], + stroke="black", + default_size=128, + ) + self.fig.marks = ( + self.plotted_image, + self.center_line, + self.slice_line, + ) + + def create_app(self): + self.button_box = HBox( + self.all_buttons, + layout=self.footer_layout, + ) + self.footer2 = VBox( + [ + self.button_box, + HBox( + [ + self.status_bar_xrange, + self.status_bar_yrange, + self.status_bar_intensity, + ], + layout=self.footer_layout, + ), + HBox( + [ + self.status_bar_xdistance, + self.status_bar_ydistance, + ], + layout=self.footer_layout, + ), + ], + layout=self.footer_layout, + ) + self.footer = VBox([self.footer1, self.footer2]) + self.center = HBox( + [self.slice_line_slider, self.fig, self.hist.fig], layout=self.center_layout + ) + self.app = VBox([self.header, self.center, self.footer]) + + def plot(self, projections, no_check=True): + super().plot(projections, no_check=no_check) + self.slice_line_slider.max = self.pxY - 1 + self.slice_line_slider.value = int((self.pxY - 1) / 2) + + def update_center_line(self, Center, slider_idx): + self.center_line.x = [ + Center.cen_range[slider_idx] / self.pxX, + Center.cen_range[slider_idx] / self.pxX, + ] + + def update_tilted_center_line(self, Center): + if Center.reg is None and len(Center.center_slice_list) == 0: + # self.scatter.x = None + # self.scatter.y = None + self.fig.marks = ( + self.plotted_image, + self.center_line, + self.slice_line, + ) + self.fig.marks = ( + self.plotted_image, + self.center_line, + self.slice_line, + ) + else: + x_vals = [cen[0] / self.pxX for cen in Center.center_slice_list] + y_vals = [cen[1] / self.pxY for cen in Center.center_slice_list] + self.scatter.x = x_vals + self.scatter.y = y_vals + self.tilted_center_line.x = [ + Center.reg_centers[0] / self.pxX, + Center.reg_centers[-1] / self.pxX, + ] + if Center.reg is None and len(Center.center_slice_list) == 1: + self.fig.marks = ( + self.plotted_image, + self.center_line, + self.slice_line, + self.scatter, + ) + else: + self.fig.marks = ( + self.plotted_image, + self.center_line, + self.slice_line, + self.tilted_center_line, + self.scatter, + ) + + + +class BqImViewer_Center_Recon(BqImViewer_Projections_Parent): + # TODO: make reconstruction io_object + def plot(self, rec): + self.pxX = rec.shape[2] + self.pxY = rec.shape[1] + self.original_images = rec + self.images = rec + self.ds_viewer_dropdown.value = 0 + self.ds_factor = self.ds_viewer_dropdown.value + self.current_image_ind = 0 + self.change_aspect_ratio() + self.image_index_slider.max = self.images.shape[0] - 1 + self.image_index_slider.value = int(self.images.shape[0] / 2) + self.plotted_image.image = self.images[self.image_index_slider.value] + self.hist.refresh_histogram() + self.hist.rm_high_low_int(None) + + +class BqImViewer_TwoEnergy_High(BqImViewer_Projections_Parent): + def __init__(self): + super().__init__() + # Rectangle selector button + self.rectangle_selector_button.tooltip = ( + "Turn on the rectangular region selector. Select a region here " + "to do phase correlation on." + ) + self.rectangle_selector_on = False + self.rectangle_select(None) + self.viewing = False + + def plot(self, projections): + super().plot(projections) + self.filedir = projections.filedir + self.viewing = True + self.change_buttons() + + def change_buttons(self): + if self.viewer_child.viewing and self.viewing: + self.viewer_child.scale_button.button_style = "info" + self.viewer_child.scale_button.disabled = False + else: + self.viewer_child.scale_button.button_style = "" + self.viewer_child.scale_button.disabled = True + + # Rectangle selector to update projection range + def rectangle_to_px_range(self, *args): + BqImViewer_Projections_Parent.rectangle_to_px_range(self) + self.viewer_child.match_rectangle_selector_range_parent() + + +class BqImViewer_TwoEnergy_Low(BqImViewer_TwoEnergy_High): + def __init__(self, viewer_parent): + super().__init__() + self.viewer_parent = viewer_parent + self.viewer_parent.viewer_child = self + # Rectangle selector button + self.rectangle_selector_button.tooltip = ( + "Turn on the rectangular region selector. Select a region here " + "to do phase correlation on. This will be the moving image." + ) + self.diff_button = Button( + icon="minus", + layout=self.button_layout, + style=self.button_font, + tooltip="Take the difference of the high and low energies.", + disabled=True, + ) + self.link_plotted_projections_button = Button( + icon="unlink", + tooltip="Link to the high energy slider.", + layout=self.button_layout, + style=self.button_font, + ) + self.link_plotted_projections_button.on_click(self.link_plotted_projections) + self.plots_linked = False + self.scale_button = Button( + tooltip="Click this to scale the projections to the higher energy.", + icon="compress", + button_style="", + disabled=True, + layout=self.button_layout, + style=self.button_font, + ) + self.start_button = Button( + disabled=True, + button_style="", + tooltip="Register low energy to high energy images", + icon="fa-running", + layout=self.button_layout, + style=self.button_font, + ) + + self.all_buttons.insert(-2, self.diff_button) + self.all_buttons.insert(-2, self.link_plotted_projections_button) + self.all_buttons.insert(-2, self.scale_button) + self.all_buttons.insert(-2, self.start_button) + self.diff_button.on_click(self.switch_to_diff) + self._disable_diff_callback = True + self.viewing = False + self.diff_on = False + + # Rectangle selector to update projection range + def rectangle_to_px_range(self, *args): + BqImViewer_Projections_Parent.rectangle_to_px_range(self) + self.match_rectangle_selector_range_parent() + + def link_plotted_projections(self, *args): + BqImViewer_Projections_Child.link_plotted_projections(self) + + def match_rectangle_selector_range_parent(self): + selected_x = self.rectangle_selector.selected_x + selected_y = self.rectangle_selector.selected_y + selected_x_par = self.viewer_parent.rectangle_selector.selected_x + selected_y_par = self.viewer_parent.rectangle_selector.selected_y + if selected_x is None: + selected_x = selected_x_par + if selected_y is None: + selected_y = selected_y_par + x_diff_par = selected_x_par[1] - selected_x_par[0] + y_diff_par = selected_y_par[1] - selected_y_par[0] + self.rectangle_selector.set_trait( + "selected_x", [selected_x[0], selected_x[0] + x_diff_par] + ) + self.rectangle_selector.set_trait( + "selected_y", [selected_y[0], selected_y[0] + y_diff_par] + ) + + def change_buttons(self): + if self.viewer_parent.viewing and self.viewing: + self.scale_button.button_style = "info" + self.scale_button.disabled = False + else: + self.scale_button.button_style = "" + self.scale_button.disabled = True + + def create_app(self): + + self.button_box = HBox( + self.all_buttons, + layout=self.footer_layout, + ) + footer2 = VBox( + [ + self.button_box, + HBox( + [ + self.status_bar_xrange, + self.status_bar_yrange, + self.status_bar_intensity, + ], + layout=Layout(justify_content="center"), + ), + ] + ) + + footer = VBox([self.footer1, footer2]) + + self.app = VBox([self.header, self.center, footer]) + + def switch_to_diff(self, *args): + if not self.diff_on and not self._disable_diff_callback: + self.images = self.diff_images + self.plotted_image.image = self.images[self.image_index_slider.value] + self.diff_on = True + self._disable_diff_callback = True + self.diff_button.button_style = "success" + self._disable_diff_callback = False + elif not self._disable_diff_callback: + self.images = self.original_images + self.plotted_image.image = self.images[self.image_index_slider.value] + self.diff_on = False + self._disable_diff_callback = True + self.diff_button.button_style = "" + self._disable_diff_callback = False + + +class BqImHist: + def __init__(self, viewer: BqImViewerBase): + self.vmin = np.min(viewer.images) + self.vmax = np.max(viewer.images) + self.init_vmin = None + self.init_vmax = None + self.viewer = viewer + self.viewer.rm_high_low_int_button.on_click(self.rm_high_low_int) + self.fig = bq.Figure( + padding=0, + fig_margin=dict(top=0, bottom=0, left=0, right=0), + ) + self.precomputed_hist = None + self.refresh_histogram() + self.fig.layout.width = "100px" + self.fig.layout.height = viewer.fig.layout.height + self.copied_hist = False + + def reset_state(self): + self.vmin = self.init_vmin + self.vmax = self.init_vmax + self.selector.selected = None + self.vmin = float(np.min(self.images)) + self.vmax = float(np.max(self.images)) + self.rm_high_low_int(None) + + def refresh_histogram(self): + if self.precomputed_hist is not None: + if self.viewer.from_hdf or self.copied_hist: + self.refresh_histogram_from_hdf() + else: + self.refresh_histogram_from_downsampled_folder() + self.x_sc = bq.LinearScale( + min=float(self.images_min), max=float(self.images_max) + ) + self.y_sc = bq.LinearScale() + self.fig.scale_x = self.x_sc + self.fig.scale_y = bq.LinearScale() + self.hist = bq.Bars( + x=self.bin_centers, + y=self.frequency, + scales={ + "x": self.x_sc, + "y": self.y_sc, + }, + colors=["dodgerblue"], + opacities=[0.75], + orientation="horizontal", + ) + ind = self.bin_centers < self.vmin + self.frequency[ind] = 0 + self.hist.scales["y"].max = float(np.max(self.frequency)) + + else: + self.refresh_histogram_without_precompute() + self.selector = bq.interacts.BrushIntervalSelector( + orientation="vertical", scale=self.x_sc + ) + self.selector.selected = None + self.selector.observe(self.update_crange_selector, "selected") + self.fig.marks = [self.hist] + self.fig.interaction = self.selector + # self.selector.selected = [self.init_vmin, self.init_vmax] + self.rm_high_low_int(None) + + def refresh_histogram_from_hdf(self): + self.ds_factor = self.viewer.ds_factor + self.ds_dict = self.precomputed_hist + self.bin_centers = self.precomputed_hist[ + self.viewer.projections.hdf_key_bin_centers + ] + self.frequency = self.ds_dict[self.viewer.projections.hdf_key_bin_frequency] + self.images_min = float( + self.ds_dict[self.viewer.projections.hdf_key_image_range][0] + ) + self.images_max = float( + self.ds_dict[self.viewer.projections.hdf_key_image_range][1] + ) + self.vmin = float(self.ds_dict[self.viewer.projections.hdf_key_percentile][0]) + self.init_vmin = float(self.vmin) + self.vmax = float(self.ds_dict[self.viewer.projections.hdf_key_percentile][1]) + self.init_vmax = float(self.vmax) + self.ds_factor_num = self.ds_dict[self.viewer.projections.hdf_key_ds_factor] + + def refresh_histogram_from_downsampled_folder(self): + self.bin_centers = self.precomputed_hist[-1]["bin_centers"] + self.frequency = self.precomputed_hist[-1]["frequency"] + self.images_min = float(np.min(self.viewer.images)) + self.images_max = float(np.max(self.viewer.images)) + self.vmin = self.images_min + self.vmax = self.images_max + + def refresh_histogram_without_precompute(self): + self.images_min = float(np.min(self.viewer.images)) + self.images_max = float(np.max(self.viewer.images)) + self.init_vmin, self.init_vmax = np.percentile( + self.viewer.images, q=(0.5, 99.5) + ) + self.vmin = float(self.init_vmin) + self.vmax = float(self.init_vmax) + self.x_sc = bq.LinearScale(min=self.images_min, max=self.images_max) + self.y_sc = bq.LinearScale() + self.fig.scale_x = self.x_sc + self.fig.scale_y = bq.LinearScale() + self.hist = bq.Bins( + sample=self.viewer.images.ravel(), + scales={ + "x": self.x_sc, + "y": self.y_sc, + }, + colors=["dodgerblue"], + opacities=[0.75], + orientation="horizontal", + bins=100, + density=True, + ) + self.bin_centers = self.hist.x + self.frequency = self.hist.y + ind = self.bin_centers < self.images_min + self.frequency[ind] = 0 + self.hist.scales["y"].max = float(np.max(self.frequency)) + + def update_crange_selector(self, *args): + if self.selector.selected is not None: + self.viewer.image_scale["image"].min = self.selector.selected[0] + self.viewer.image_scale["image"].max = self.selector.selected[1] + self.vmin = self.selector.selected[0] + self.vmax = self.selector.selected[1] + + def rm_high_low_int(self, change): + self.vmin, self.vmax = self.init_vmin, self.init_vmax + self.selector.selected = [float(self.vmin), float(self.vmax)] + + +class BqImHist_Child(BqImHist): + def __init__(self, viewer, viewer_parent): + super().__init__(viewer) + self.viewer_parent = viewer_parent + + def copy_parent_hist(self): + self.precomputed_hist = copy.copy(self.viewer_parent.hist.precomputed_hist) + self.copied_hist = True + # self.selector = bq.interacts.BrushIntervalSelector( + # orientation="vertical", + # scale=self.viewer_parent.hist.x_sc, + # ) + # self.selector.observe(self.update_crange_selector, "selected") + # self.fig.interaction = self.selector + # self.fig.marks = [self.hist] + + +class ScaleBar: + def __init__(self): + pass + + # attempt to make scale bar. try again later 02/05/2022 + # import bqplot as bq + + # sc_x = LinearScale(min=0, max=1) + # sc_y = LinearScale(min=1, max=0) + + # pxX = 1024 + # px_size = 30 + # px_per_micron = 1000 / px_size + # px_per_micron_half = px_per_micron / 2 + # x_px_center = 0.85 * pxX + # num_microns = 5 + # x_coord_1 = x_px_center - px_per_micron_half * num_microns + # x_coord_2 = x_px_center + px_per_micron_half * num_microns + + # x_line_px = np.array( + # [x_coord_1, x_coord_2, x_coord_2, x_coord_1], dtype=np.float32 + # ) + # x_line_px_scaled = x_line_px / pxX + # x_line = [[0.65, 0.7, 0.7, 0.65]] + # y_line = np.array([0.85, 0.85, 0.9, 0.9]) + + # patch = Lines( + # x=x_line_px_scaled, + # y=y_line, + # fill_colors=["white"], + # fill="inside", + # stroke_width=0, + # close_path=True, + # scales={"x": sc_x, "y": sc_y}, + # ) + + # label_text = [f"{num_microns} μm"] + # label_pos_x = (x_line_px_scaled[0] + x_line_px_scaled[1]) / 2 - 0.08 + # label_pos_y = y_line[0] - 0.07 + # test_label = bq.Label( + # x=[label_pos_x], + # y=[label_pos_y], + # scales={"x": sc_x, "y": sc_y}, + # text=label_text, + # default_size=30, + # font_weight="bolder", + # colors="white", + # update_on_move=True, + # ) + + # dimensions = ("550px", "550px") + + # fig = Figure( + # marks=[altered_viewer.plotted_image, patch, test_label], + # animation_duration=1000, + # ) + # fig.layout.height = dimensions[1] + # fig.layout.width = dimensions[0] + # fig diff --git a/tomopyui_app.ipynb b/tomopyui_app.ipynb index 51e15ee..3259bbc 100644 --- a/tomopyui_app.ipynb +++ b/tomopyui_app.ipynb @@ -2,19 +2,57 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "2ab4bfd4-e0b6-41f1-99b8-8b5a77fa0e29", - "metadata": {}, - "outputs": [], + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Warning: dxfile module not found\n", + "Warning: netCDF4 module not found\n", + "spefile module not found\n", + "netCDF4 module not found\n", + "EdfFile module not found\n", + "spefile module not found\n", + "netCDF4 module not found\n", + "EdfFile module not found\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "87fc75c80d874707afb1e9e8577e70c5", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "%reload_ext autoreload\n", "%autoreload 2\n", - "%matplotlib ipympl\n", "import tomopyui.widgets.main as main\n", - "\n", - "dashboard, file_import, center, prep, align, recon = main.create_dashboard()\n", - "dashboard" + "dashboard_output, dashboard, file_import, prep, center, align, recon, dataexplorer = main.create_dashboard(\"APS\") # can be \"SSRL_62C\", \"ALS_832\", \"APS\"\n", + "dashboard_output" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3e464f3e-100e-4017-b35d-f3f54124af64", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -33,7 +71,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" + "version": "3.9.7" } }, "nbformat": 4, diff --git a/tomopyui_app2.ipynb b/tomopyui_app2.ipynb new file mode 100644 index 0000000..ac18945 --- /dev/null +++ b/tomopyui_app2.ipynb @@ -0,0 +1,177 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "2ab4bfd4-e0b6-41f1-99b8-8b5a77fa0e29", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "60f1d9db21074124abb634ce1a17d9e9", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%reload_ext autoreload\n", + "%autoreload 2\n", + "import tomopyui.widgets.main as main\n", + "dashboard_output, dashboard, file_import, prep, center, align, recon, dataexplorer = main.create_dashboard(\"SSRL_62C\") # can be \"SSRL_62C\", \"ALS_832\", \"APS\"\n", + "dashboard_output" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "ea29f292-6c29-4651-a02f-a664593ee919", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "h5py._hl.dataset.Dataset" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "type(file_import.projections.data)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "cdf05c43-9306-4e26-8d30-1a1a659207c9", + "metadata": {}, + "outputs": [], + "source": [ + "import tomopyui.widgets.prep" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7588770-2171-41e4-99d8-4e5f1ecdc1d1", + "metadata": {}, + "outputs": [], + "source": [ + "file_import.raw_uploader.projections.scan_info[\"PROJECTION_METADATA\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "f6cd6668-3fa5-4e22-844e-ae3cf52e0fd7", + "metadata": {}, + "outputs": [], + "source": [ + "import tomopyui.backend.io\n", + "import tomopyui.widgets.imports\n", + "import tomopyui.widgets.view" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "ff8ba9ff-5618-4ee6-bcb3-e7e33328a6dc", + "metadata": {}, + "outputs": [], + "source": [ + "import tomopyui.backend.runanalysis\n", + "import tomopyui.widgets.analysis\n", + "import tomopyui.tomocupy.prep.alignment" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "2a0b76d4-3f32-4319-a222-9487fed7e21a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "WindowsPath('D:/20220422_Welborn/fast_goodparticle_TOMO-XANES_220422_220825/08000.00eV')" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "file_import.raw_uploader.projections.filedir" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "id": "29fe8ba0-20a3-48a4-b7ab-d48b424891a2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "10" + ] + }, + "execution_count": 53, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import dxchange.reader\n", + "from tomopyui.backend.util.dxchange.reader import read_ole_metadata, read_xrm, read_txrm\n", + "# file_import.raw_uploader.projections.read_xrms_metadata([r\"Y:\\202204\\20220422_Welborn\\13_5_1C_to4p5V_from_4_3time\\20220423_0109_ref_7817eV.xrm\"])\n", + "x, y = read_txrm(r\"Y:\\202204\\20220422_Welborn\\13_5_1C_to4p5V_from_4_3time\\particle_below_above_in_middleofedge_TOMO-XANES_220423_141202\\ref_particle_below_above_in_middleofedge_idx00002_-180.00deg_07797.00eV_average010.txrm\")\n", + "\n", + "len(x)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00f61d4d-6f54-4bb4-a2a0-74a9e782298b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}