From 8c1efc826c768168357e8938544476011afb1192 Mon Sep 17 00:00:00 2001 From: IkaKes Date: Wed, 11 Jun 2025 09:33:25 +0000 Subject: [PATCH 01/11] The directory structure and the main notebook --- Makefile | 144 ++++ docs/Makefile | 153 ++++ docs/commands.rst | 10 + docs/conf.py | 244 +++++++ docs/getting-started.rst | 6 + docs/index.rst | 24 + docs/make.bat | 190 +++++ models/.gitkeep | 0 notebooks/.gitkeep | 0 notebooks/test.ipynb | 1212 ++++++++++++++++++++++++++++++++ references/.gitkeep | 0 reports/.gitkeep | 0 reports/figures/.gitkeep | 0 requirements.txt | 10 + setup.py | 10 + src/__init__.py | 0 src/data/.gitkeep | 0 src/data/__init__.py | 0 src/data/make_dataset.py | 30 + src/features/.gitkeep | 0 src/features/__init__.py | 0 src/features/build_features.py | 0 src/models/.gitkeep | 0 src/models/__init__.py | 0 src/models/predict_model.py | 0 src/models/train_model.py | 0 src/visualization/.gitkeep | 0 src/visualization/__init__.py | 0 src/visualization/visualize.py | 0 test_environment.py | 25 + tox.ini | 3 + 31 files changed, 2061 insertions(+) create mode 100644 Makefile create mode 100644 docs/Makefile create mode 100644 docs/commands.rst create mode 100644 docs/conf.py create mode 100644 docs/getting-started.rst create mode 100644 docs/index.rst create mode 100644 docs/make.bat create mode 100644 models/.gitkeep create mode 100644 notebooks/.gitkeep create mode 100644 notebooks/test.ipynb create mode 100644 references/.gitkeep create mode 100644 reports/.gitkeep create mode 100644 reports/figures/.gitkeep create mode 100644 requirements.txt create mode 100644 setup.py create mode 100644 src/__init__.py create mode 100644 src/data/.gitkeep create mode 100644 src/data/__init__.py create mode 100644 src/data/make_dataset.py create mode 100644 src/features/.gitkeep create mode 100644 src/features/__init__.py create mode 100644 src/features/build_features.py create mode 100644 src/models/.gitkeep create mode 100644 src/models/__init__.py create mode 100644 src/models/predict_model.py create mode 100644 src/models/train_model.py create mode 100644 src/visualization/.gitkeep create mode 100644 src/visualization/__init__.py create mode 100644 src/visualization/visualize.py create mode 100644 test_environment.py create mode 100644 tox.ini diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..71a2029 --- /dev/null +++ b/Makefile @@ -0,0 +1,144 @@ +.PHONY: clean data lint requirements sync_data_to_s3 sync_data_from_s3 + +################################################################################# +# GLOBALS # +################################################################################# + +PROJECT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) +BUCKET = [OPTIONAL] your-bucket-for-syncing-data (do not include 's3://') +PROFILE = default +PROJECT_NAME = project_name +PYTHON_INTERPRETER = python3 + +ifeq (,$(shell which conda)) +HAS_CONDA=False +else +HAS_CONDA=True +endif + +################################################################################# +# COMMANDS # +################################################################################# + +## Install Python Dependencies +requirements: test_environment + $(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel + $(PYTHON_INTERPRETER) -m pip install -r requirements.txt + +## Make Dataset +data: requirements + $(PYTHON_INTERPRETER) src/data/make_dataset.py data/raw data/processed + +## Delete all compiled Python files +clean: + find . -type f -name "*.py[co]" -delete + find . -type d -name "__pycache__" -delete + +## Lint using flake8 +lint: + flake8 src + +## Upload Data to S3 +sync_data_to_s3: +ifeq (default,$(PROFILE)) + aws s3 sync data/ s3://$(BUCKET)/data/ +else + aws s3 sync data/ s3://$(BUCKET)/data/ --profile $(PROFILE) +endif + +## Download Data from S3 +sync_data_from_s3: +ifeq (default,$(PROFILE)) + aws s3 sync s3://$(BUCKET)/data/ data/ +else + aws s3 sync s3://$(BUCKET)/data/ data/ --profile $(PROFILE) +endif + +## Set up python interpreter environment +create_environment: +ifeq (True,$(HAS_CONDA)) + @echo ">>> Detected conda, creating conda environment." +ifeq (3,$(findstring 3,$(PYTHON_INTERPRETER))) + conda create --name $(PROJECT_NAME) python=3 +else + conda create --name $(PROJECT_NAME) python=2.7 +endif + @echo ">>> New conda env created. Activate with:\nsource activate $(PROJECT_NAME)" +else + $(PYTHON_INTERPRETER) -m pip install -q virtualenv virtualenvwrapper + @echo ">>> Installing virtualenvwrapper if not already installed.\nMake sure the following lines are in shell startup file\n\ + export WORKON_HOME=$$HOME/.virtualenvs\nexport PROJECT_HOME=$$HOME/Devel\nsource /usr/local/bin/virtualenvwrapper.sh\n" + @bash -c "source `which virtualenvwrapper.sh`;mkvirtualenv $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER)" + @echo ">>> New virtualenv created. Activate with:\nworkon $(PROJECT_NAME)" +endif + +## Test python environment is setup correctly +test_environment: + $(PYTHON_INTERPRETER) test_environment.py + +################################################################################# +# PROJECT RULES # +################################################################################# + + + +################################################################################# +# Self Documenting Commands # +################################################################################# + +.DEFAULT_GOAL := help + +# Inspired by +# sed script explained: +# /^##/: +# * save line in hold space +# * purge line +# * Loop: +# * append newline + line to hold space +# * go to next line +# * if line starts with doc comment, strip comment character off and loop +# * remove target prerequisites +# * append hold space (+ newline) to line +# * replace newline plus comments by `---` +# * print line +# Separate expressions are necessary because labels cannot be delimited by +# semicolon; see +.PHONY: help +help: + @echo "$$(tput bold)Available rules:$$(tput sgr0)" + @echo + @sed -n -e "/^## / { \ + h; \ + s/.*//; \ + :doc" \ + -e "H; \ + n; \ + s/^## //; \ + t doc" \ + -e "s/:.*//; \ + G; \ + s/\\n## /---/; \ + s/\\n/ /g; \ + p; \ + }" ${MAKEFILE_LIST} \ + | LC_ALL='C' sort --ignore-case \ + | awk -F '---' \ + -v ncol=$$(tput cols) \ + -v indent=19 \ + -v col_on="$$(tput setaf 6)" \ + -v col_off="$$(tput sgr0)" \ + '{ \ + printf "%s%*s%s ", col_on, -indent, $$1, col_off; \ + n = split($$2, words, " "); \ + line_length = ncol - indent; \ + for (i = 1; i <= n; i++) { \ + line_length -= length(words[i]) + 1; \ + if (line_length <= 0) { \ + line_length = ncol - indent - length(words[i]) - 1; \ + printf "\n%*s ", -indent, " "; \ + } \ + printf "%s ", words[i]; \ + } \ + printf "\n"; \ + }' \ + | more $(shell test $(shell uname) = Darwin && echo '--no-init --raw-control-chars') diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..56a5c29 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,153 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/project_name.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/project_name.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/project_name" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/project_name" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/docs/commands.rst b/docs/commands.rst new file mode 100644 index 0000000..2d162f3 --- /dev/null +++ b/docs/commands.rst @@ -0,0 +1,10 @@ +Commands +======== + +The Makefile contains the central entry points for common tasks related to this project. + +Syncing data to S3 +^^^^^^^^^^^^^^^^^^ + +* `make sync_data_to_s3` will use `aws s3 sync` to recursively sync files in `data/` up to `s3://[OPTIONAL] your-bucket-for-syncing-data (do not include 's3://')/data/`. +* `make sync_data_from_s3` will use `aws s3 sync` to recursively sync files from `s3://[OPTIONAL] your-bucket-for-syncing-data (do not include 's3://')/data/` to `data/`. diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..87b240c --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,244 @@ +# -*- coding: utf-8 -*- +# +# project_name documentation build configuration file, created by +# sphinx-quickstart. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +import sys + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'project_name' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.1' +# The full version, including alpha/beta/rc tags. +release = '0.1' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'project_namedoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # 'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', + 'project_name.tex', + u'project_name Documentation', + u"Your name (or your organization/company/team)", 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'project_name', u'project_name Documentation', + [u"Your name (or your organization/company/team)"], 1) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'project_name', u'project_name Documentation', + u"Your name (or your organization/company/team)", 'project_name', + 'A short description of the project.', 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' diff --git a/docs/getting-started.rst b/docs/getting-started.rst new file mode 100644 index 0000000..b4f71c3 --- /dev/null +++ b/docs/getting-started.rst @@ -0,0 +1,6 @@ +Getting started +=============== + +This is where you describe how to get set up on a clean install, including the +commands necessary to get the raw data (using the `sync_data_from_s3` command, +for example), and then how to make the cleaned, final data sets. diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..3302c62 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,24 @@ +.. project_name documentation master file, created by + sphinx-quickstart. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +project_name documentation! +============================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + getting-started + commands + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..b9cc86d --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,190 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\project_name.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\project_name.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end diff --git a/models/.gitkeep b/models/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/notebooks/.gitkeep b/notebooks/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/notebooks/test.ipynb b/notebooks/test.ipynb new file mode 100644 index 0000000..d6d7628 --- /dev/null +++ b/notebooks/test.ipynb @@ -0,0 +1,1212 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "cb8fc6f9-32ba-4e84-8eb4-013b1eb99416", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: numpy in /opt/conda/lib/python3.11/site-packages (1.24.4)\n", + "Requirement already satisfied: scipy in /opt/conda/lib/python3.11/site-packages (1.11.3)\n", + "Requirement already satisfied: pandas in /opt/conda/lib/python3.11/site-packages (2.1.1)\n", + "Requirement already satisfied: matplotlib in /opt/conda/lib/python3.11/site-packages (3.8.0)\n", + "Requirement already satisfied: scikit-learn in /opt/conda/lib/python3.11/site-packages (1.3.1)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /opt/conda/lib/python3.11/site-packages (from pandas) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /opt/conda/lib/python3.11/site-packages (from pandas) (2023.3.post1)\n", + "Requirement already satisfied: tzdata>=2022.1 in /opt/conda/lib/python3.11/site-packages (from pandas) (2023.3)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /opt/conda/lib/python3.11/site-packages (from matplotlib) (1.1.1)\n", + "Requirement already satisfied: cycler>=0.10 in /opt/conda/lib/python3.11/site-packages (from matplotlib) (0.12.1)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /opt/conda/lib/python3.11/site-packages (from matplotlib) (4.43.1)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /opt/conda/lib/python3.11/site-packages (from matplotlib) (1.4.5)\n", + "Requirement already satisfied: packaging>=20.0 in /opt/conda/lib/python3.11/site-packages (from matplotlib) (23.2)\n", + "Requirement already satisfied: pillow>=6.2.0 in /opt/conda/lib/python3.11/site-packages (from matplotlib) (10.1.0)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /opt/conda/lib/python3.11/site-packages (from matplotlib) (3.1.1)\n", + "Requirement already satisfied: joblib>=1.1.1 in /opt/conda/lib/python3.11/site-packages (from scikit-learn) (1.3.2)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /opt/conda/lib/python3.11/site-packages (from scikit-learn) (3.2.0)\n", + "Requirement already satisfied: six>=1.5 in /opt/conda/lib/python3.11/site-packages (from python-dateutil>=2.8.2->pandas) (1.16.0)\n" + ] + } + ], + "source": [ + "# 1) \n", + "!pip install numpy scipy pandas matplotlib scikit-learn" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "e4067844-7cd2-466d-bec5-dd65eeecdb02", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://download.pytorch.org/whl/cu124\n", + "Collecting torch==2.4.0+cu124\n", + " Using cached https://download.pytorch.org/whl/cu124/torch-2.4.0%2Bcu124-cp311-cp311-linux_x86_64.whl (797.3 MB)\n", + "Collecting torchvision==0.19.0+cu124\n", + " Using cached https://download.pytorch.org/whl/cu124/torchvision-0.19.0%2Bcu124-cp311-cp311-linux_x86_64.whl (7.1 MB)\n", + "Collecting torchaudio==2.4.0+cu124\n", + " Using cached https://download.pytorch.org/whl/cu124/torchaudio-2.4.0%2Bcu124-cp311-cp311-linux_x86_64.whl (3.4 MB)\n", + "Collecting filelock (from torch==2.4.0+cu124)\n", + " Using cached filelock-3.18.0-py3-none-any.whl.metadata (2.9 kB)\n", + "Requirement already satisfied: typing-extensions>=4.8.0 in /opt/conda/lib/python3.11/site-packages (from torch==2.4.0+cu124) (4.8.0)\n", + "Requirement already satisfied: sympy in /opt/conda/lib/python3.11/site-packages (from torch==2.4.0+cu124) (1.12)\n", + "Requirement already satisfied: networkx in /opt/conda/lib/python3.11/site-packages (from torch==2.4.0+cu124) (3.2)\n", + "Requirement already satisfied: jinja2 in /opt/conda/lib/python3.11/site-packages (from torch==2.4.0+cu124) (3.1.2)\n", + "Requirement already satisfied: fsspec in /opt/conda/lib/python3.11/site-packages (from torch==2.4.0+cu124) (2023.9.2)\n", + "Collecting nvidia-cuda-nvrtc-cu12==12.4.99 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_cuda_nvrtc_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl (24.7 MB)\n", + "Collecting nvidia-cuda-runtime-cu12==12.4.99 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_cuda_runtime_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl (883 kB)\n", + "Collecting nvidia-cuda-cupti-cu12==12.4.99 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_cuda_cupti_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl (13.8 MB)\n", + "Collecting nvidia-cudnn-cu12==9.1.0.70 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl (664.8 MB)\n", + "Collecting nvidia-cublas-cu12==12.4.2.65 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_cublas_cu12-12.4.2.65-py3-none-manylinux2014_x86_64.whl (363.0 MB)\n", + "Collecting nvidia-cufft-cu12==11.2.0.44 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_cufft_cu12-11.2.0.44-py3-none-manylinux2014_x86_64.whl (211.5 MB)\n", + "Collecting nvidia-curand-cu12==10.3.5.119 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_curand_cu12-10.3.5.119-py3-none-manylinux2014_x86_64.whl (56.3 MB)\n", + "Collecting nvidia-cusolver-cu12==11.6.0.99 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_cusolver_cu12-11.6.0.99-py3-none-manylinux2014_x86_64.whl (128.4 MB)\n", + "Collecting nvidia-cusparse-cu12==12.3.0.142 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_cusparse_cu12-12.3.0.142-py3-none-manylinux2014_x86_64.whl (207.5 MB)\n", + "Collecting nvidia-nccl-cu12==2.20.5 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl (176.2 MB)\n", + "Collecting nvidia-nvtx-cu12==12.4.99 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_nvtx_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl (99 kB)\n", + "Collecting nvidia-nvjitlink-cu12==12.4.99 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/cu124/nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl (21.1 MB)\n", + "Collecting triton==3.0.0 (from torch==2.4.0+cu124)\n", + " Using cached https://download.pytorch.org/whl/triton-3.0.0-1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (209.4 MB)\n", + "Requirement already satisfied: numpy in /opt/conda/lib/python3.11/site-packages (from torchvision==0.19.0+cu124) (1.24.4)\n", + "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /opt/conda/lib/python3.11/site-packages (from torchvision==0.19.0+cu124) (10.1.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /opt/conda/lib/python3.11/site-packages (from jinja2->torch==2.4.0+cu124) (2.1.3)\n", + "Requirement already satisfied: mpmath>=0.19 in /opt/conda/lib/python3.11/site-packages (from sympy->torch==2.4.0+cu124) (1.3.0)\n", + "Using cached filelock-3.18.0-py3-none-any.whl (16 kB)\n", + "Installing collected packages: nvidia-nvtx-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, filelock, triton, nvidia-cusparse-cu12, nvidia-cudnn-cu12, nvidia-cusolver-cu12, torch, torchvision, torchaudio\n", + "Successfully installed filelock-3.18.0 nvidia-cublas-cu12-12.4.2.65 nvidia-cuda-cupti-cu12-12.4.99 nvidia-cuda-nvrtc-cu12-12.4.99 nvidia-cuda-runtime-cu12-12.4.99 nvidia-cudnn-cu12-9.1.0.70 nvidia-cufft-cu12-11.2.0.44 nvidia-curand-cu12-10.3.5.119 nvidia-cusolver-cu12-11.6.0.99 nvidia-cusparse-cu12-12.3.0.142 nvidia-nccl-cu12-2.20.5 nvidia-nvjitlink-cu12-12.4.99 nvidia-nvtx-cu12-12.4.99 torch-2.4.0+cu124 torchaudio-2.4.0+cu124 torchvision-0.19.0+cu124 triton-3.0.0\n" + ] + } + ], + "source": [ + "# 2) \n", + "!pip install \\\n", + " torch==2.4.0+cu124 \\\n", + " torchvision==0.19.0+cu124 \\\n", + " torchaudio==2.4.0+cu124 \\\n", + " --extra-index-url https://download.pytorch.org/whl/cu124" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "8400c224-a0bb-4521-a47f-32ae7fdfa2e1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in links: https://data.pyg.org/whl/torch-2.4.0+cu124.html\n", + "Collecting pyg_lib\n", + " Using cached https://data.pyg.org/whl/torch-2.4.0%2Bcu124/pyg_lib-0.4.0%2Bpt24cu124-cp311-cp311-linux_x86_64.whl (2.5 MB)\n", + "Collecting torch_scatter==2.1.2\n", + " Using cached https://data.pyg.org/whl/torch-2.4.0%2Bcu124/torch_scatter-2.1.2%2Bpt24cu124-cp311-cp311-linux_x86_64.whl (10.7 MB)\n", + "Collecting torch_sparse==0.6.17\n", + " Using cached torch_sparse-0.6.17-cp311-cp311-linux_x86_64.whl\n", + "Collecting torch_cluster==1.6.1\n", + " Using cached torch_cluster-1.6.1-cp311-cp311-linux_x86_64.whl\n", + "Collecting torch_spline_conv==1.2.2\n", + " Using cached https://data.pyg.org/whl/torch-2.4.0%2Bcu124/torch_spline_conv-1.2.2%2Bpt24cu124-cp311-cp311-linux_x86_64.whl (995 kB)\n", + "Requirement already satisfied: scipy in /opt/conda/lib/python3.11/site-packages (from torch_sparse==0.6.17) (1.11.3)\n", + "Requirement already satisfied: numpy<1.28.0,>=1.21.6 in /opt/conda/lib/python3.11/site-packages (from scipy->torch_sparse==0.6.17) (1.24.4)\n", + "Installing collected packages: torch_spline_conv, torch_scatter, pyg_lib, torch_sparse, torch_cluster\n", + "Successfully installed pyg_lib-0.4.0+pt24cu124 torch_cluster-1.6.1 torch_scatter-2.1.2+pt24cu124 torch_sparse-0.6.17 torch_spline_conv-1.2.2+pt24cu124\n" + ] + } + ], + "source": [ + "# 3) PyG low-level CUDA kernels (must match torch+CUDA)\n", + "!pip install \\\n", + " pyg_lib \\\n", + " torch_scatter==2.1.2 \\\n", + " torch_sparse==0.6.17 \\\n", + " torch_cluster==1.6.1 \\\n", + " torch_spline_conv==1.2.2 \\\n", + " -f https://data.pyg.org/whl/torch-2.4.0+cu124.html\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0ed45568-39d3-4549-8a8e-0a88680fb595", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting torch-geometric==2.3.1\n", + " Using cached torch_geometric-2.3.1-py3-none-any.whl\n", + "Collecting torch-geometric-temporal==0.54.0\n", + " Using cached torch_geometric_temporal-0.54.0-py3-none-any.whl\n", + "Requirement already satisfied: tqdm in /opt/conda/lib/python3.11/site-packages (from torch-geometric==2.3.1) (4.66.1)\n", + "Requirement already satisfied: numpy in /opt/conda/lib/python3.11/site-packages (from torch-geometric==2.3.1) (1.24.4)\n", + "Requirement already satisfied: scipy in /opt/conda/lib/python3.11/site-packages (from torch-geometric==2.3.1) (1.11.3)\n", + "Requirement already satisfied: jinja2 in /opt/conda/lib/python3.11/site-packages (from torch-geometric==2.3.1) (3.1.2)\n", + "Requirement already satisfied: requests in /opt/conda/lib/python3.11/site-packages (from torch-geometric==2.3.1) (2.31.0)\n", + "Requirement already satisfied: pyparsing in /opt/conda/lib/python3.11/site-packages (from torch-geometric==2.3.1) (3.1.1)\n", + "Requirement already satisfied: scikit-learn in /opt/conda/lib/python3.11/site-packages (from torch-geometric==2.3.1) (1.3.1)\n", + "Requirement already satisfied: psutil>=5.8.0 in /opt/conda/lib/python3.11/site-packages (from torch-geometric==2.3.1) (5.9.5)\n", + "Collecting decorator==4.4.2 (from torch-geometric-temporal==0.54.0)\n", + " Using cached decorator-4.4.2-py2.py3-none-any.whl.metadata (4.2 kB)\n", + "Requirement already satisfied: torch in /opt/conda/lib/python3.11/site-packages (from torch-geometric-temporal==0.54.0) (2.4.0+cu124)\n", + "Requirement already satisfied: cython in /opt/conda/lib/python3.11/site-packages (from torch-geometric-temporal==0.54.0) (3.0.4)\n", + "Collecting pandas<=1.3.5 (from torch-geometric-temporal==0.54.0)\n", + " Using cached pandas-1.3.5-cp311-cp311-linux_x86_64.whl\n", + "Requirement already satisfied: torch-sparse in /opt/conda/lib/python3.11/site-packages (from torch-geometric-temporal==0.54.0) (0.6.17)\n", + "Requirement already satisfied: torch-scatter in /opt/conda/lib/python3.11/site-packages (from torch-geometric-temporal==0.54.0) (2.1.2+pt24cu124)\n", + "Requirement already satisfied: six in /opt/conda/lib/python3.11/site-packages (from torch-geometric-temporal==0.54.0) (1.16.0)\n", + "Requirement already satisfied: networkx in /opt/conda/lib/python3.11/site-packages (from torch-geometric-temporal==0.54.0) (3.2)\n", + "Requirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/lib/python3.11/site-packages (from pandas<=1.3.5->torch-geometric-temporal==0.54.0) (2.8.2)\n", + "Requirement already satisfied: pytz>=2017.3 in /opt/conda/lib/python3.11/site-packages (from pandas<=1.3.5->torch-geometric-temporal==0.54.0) (2023.3.post1)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /opt/conda/lib/python3.11/site-packages (from jinja2->torch-geometric==2.3.1) (2.1.3)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /opt/conda/lib/python3.11/site-packages (from requests->torch-geometric==2.3.1) (3.3.0)\n", + "Requirement already satisfied: idna<4,>=2.5 in /opt/conda/lib/python3.11/site-packages (from requests->torch-geometric==2.3.1) (3.4)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/conda/lib/python3.11/site-packages (from requests->torch-geometric==2.3.1) (2.0.7)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.11/site-packages (from requests->torch-geometric==2.3.1) (2023.7.22)\n", + "Requirement already satisfied: joblib>=1.1.1 in /opt/conda/lib/python3.11/site-packages (from scikit-learn->torch-geometric==2.3.1) (1.3.2)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /opt/conda/lib/python3.11/site-packages (from scikit-learn->torch-geometric==2.3.1) (3.2.0)\n", + "Requirement already satisfied: filelock in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (3.18.0)\n", + "Requirement already satisfied: typing-extensions>=4.8.0 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (4.8.0)\n", + "Requirement already satisfied: sympy in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (1.12)\n", + "Requirement already satisfied: fsspec in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (2023.9.2)\n", + "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.4.99 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (12.4.99)\n", + "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.4.99 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (12.4.99)\n", + "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.4.99 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (12.4.99)\n", + "Requirement already satisfied: nvidia-cudnn-cu12==9.1.0.70 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (9.1.0.70)\n", + "Requirement already satisfied: nvidia-cublas-cu12==12.4.2.65 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (12.4.2.65)\n", + "Requirement already satisfied: nvidia-cufft-cu12==11.2.0.44 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (11.2.0.44)\n", + "Requirement already satisfied: nvidia-curand-cu12==10.3.5.119 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (10.3.5.119)\n", + "Requirement already satisfied: nvidia-cusolver-cu12==11.6.0.99 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (11.6.0.99)\n", + "Requirement already satisfied: nvidia-cusparse-cu12==12.3.0.142 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (12.3.0.142)\n", + "Requirement already satisfied: nvidia-nccl-cu12==2.20.5 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (2.20.5)\n", + "Requirement already satisfied: nvidia-nvtx-cu12==12.4.99 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (12.4.99)\n", + "Requirement already satisfied: nvidia-nvjitlink-cu12==12.4.99 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (12.4.99)\n", + "Requirement already satisfied: triton==3.0.0 in /opt/conda/lib/python3.11/site-packages (from torch->torch-geometric-temporal==0.54.0) (3.0.0)\n", + "Requirement already satisfied: mpmath>=0.19 in /opt/conda/lib/python3.11/site-packages (from sympy->torch->torch-geometric-temporal==0.54.0) (1.3.0)\n", + "Using cached decorator-4.4.2-py2.py3-none-any.whl (9.2 kB)\n", + "Installing collected packages: decorator, pandas, torch-geometric, torch-geometric-temporal\n", + " Attempting uninstall: decorator\n", + " Found existing installation: decorator 5.1.1\n", + " Uninstalling decorator-5.1.1:\n", + " Successfully uninstalled decorator-5.1.1\n", + " Attempting uninstall: pandas\n", + " Found existing installation: pandas 2.1.1\n", + " Uninstalling pandas-2.1.1:\n", + " Successfully uninstalled pandas-2.1.1\n", + "Successfully installed decorator-4.4.2 pandas-1.3.5 torch-geometric-2.3.1 torch-geometric-temporal-0.54.0\n" + ] + } + ], + "source": [ + "# 4) PyG high-level libraries\n", + "!pip install \\\n", + " torch-geometric==2.3.1 \\\n", + " torch-geometric-temporal==0.54.0" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "839a4985-f3cf-45a4-8a06-195069f07153", + "metadata": {}, + "outputs": [], + "source": [ + "import torch, torch_geometric, torch_geometric_temporal as tgt" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "7ab11f93-cdde-4679-a684-d823bc3c0596", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Torch: 2.4.0+cu124 | CUDA: 12.4 | GPU available: True\n", + "PyG: 2.3.1\n", + "PyG-Temporal: 0.54.0\n" + ] + } + ], + "source": [ + "print(\"Torch:\", torch.__version__,\n", + " \"| CUDA:\", torch.version.cuda,\n", + " \"| GPU available:\", torch.cuda.is_available())\n", + "print(\"PyG:\", torch_geometric.__version__)\n", + "print(\"PyG-Temporal:\", tgt.__version__)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "0bf10855-a3c9-450e-a36a-f3ca7be50cbf", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import random\n", + "import numpy as np\n", + "import pandas as pd\n", + "import torch\n", + "import torch.nn as nn\n", + "from torch.utils.data import Dataset, DataLoader, ConcatDataset\n", + "from sklearn.preprocessing import StandardScaler\n", + "from torch.optim.lr_scheduler import OneCycleLR\n", + "import torch_geometric\n", + "import torch_geometric_temporal as tgt\n", + "from torch_geometric.data import HeteroData\n", + "from torch_geometric_temporal.signal import DynamicHeteroGraphTemporalSignal\n", + "from torch_geometric_temporal.nn.hetero import HeteroGCLSTM\n", + "import copy\n", + "import matplotlib.pyplot as plt\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "948f36f9-3243-451c-93c9-aff1dee3038a", + "metadata": {}, + "outputs": [], + "source": [ + "# Global settings:\n", + "seed = 42\n", + "window_size = 10\n", + "n_epochs = 50 \n", + "hidden_dim = 128\n", + "initial_lr = 1e-4\n", + "weight_decay_val = 1e-2\n", + "max_lr_onecycle = 1e-3\n", + "pct_start_onecycle = 0.1\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "6d715734-ec80-44be-b07a-a5b7a8d03078", + "metadata": {}, + "outputs": [], + "source": [ + "# Reproducibility:\n", + "random.seed(seed)\n", + "np.random.seed(seed)\n", + "torch.manual_seed(seed)\n", + "if torch.cuda.is_available():\n", + " torch.cuda.manual_seed_all(seed)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "86d9f908-4760-450c-9116-6720536c8842", + "metadata": {}, + "outputs": [], + "source": [ + "# 1) Single measurement:\n", + "def load_and_process_single_measurement(sats_csv_path, receiver_csv_path):\n", + " \"\"\"\n", + " Reads satellite and receiver CSVs and constructs lists for:\n", + " - feature_dicts: [{ 'receiver': (1x2), 'satellite': (n_satx3) }, …]\n", + " - target_dicts: [{ 'receiver': (1x2) }, …]\n", + " - edge_index_dicts: [{ ('receiver','to','satellite'): (2xn_sat), ('satellite','rev_to','receiver'): (2xn_sat) }, …]\n", + " - edge_weight_dicts: [None, None, …]\n", + " - time_steps: [t0, t1, …]\n", + " - additional_sids: [{ 'satellite_s_ids': { 'satellite': array(n_sat,) } }, …]\n", + " \"\"\"\n", + " sats_df_meas = pd.read_csv(sats_csv_path)\n", + " receiver_df_meas = pd.read_csv(receiver_csv_path)\n", + " time_steps_meas = sorted(receiver_df_meas['T_ID'].unique())\n", + " \n", + " feature_dicts_meas = []\n", + " target_dicts_meas = []\n", + " edge_index_dicts_meas = []\n", + " additional_sids_dicts = []\n", + " \n", + " for t_local in time_steps_meas:\n", + " rec = receiver_df_meas[receiver_df_meas['T_ID'] == t_local].iloc[0]\n", + " feat_rec = rec[['Lat', 'Lon']].to_numpy().reshape(1, 2)\n", + " targ_rec = rec[['LatDev', 'LonDev']].to_numpy().reshape(1, 2)\n", + " \n", + " sats_t = sats_df_meas[sats_df_meas['T_ID'] == t_local].sort_values('S_ID')\n", + " feat_sat = sats_t[['SNR', 'az', 'el']].to_numpy()\n", + " s_ids_sat = sats_t['S_ID'].values.astype(np.int64)\n", + " n_sat = feat_sat.shape[0]\n", + " \n", + " if n_sat > 0 and n_sat != len(s_ids_sat):\n", + " s_ids_sat = s_ids_sat[:n_sat]\n", + " elif n_sat == 0 and len(s_ids_sat) > 0:\n", + " s_ids_sat = np.array([], dtype=np.int64)\n", + " \n", + " src = np.zeros(n_sat, dtype=int)\n", + " dst = np.arange(n_sat, dtype=int)\n", + " edges = np.vstack([src, dst])\n", + " edges_rev = edges[::-1].copy()\n", + " \n", + " feature_dicts_meas.append({\n", + " 'receiver': feat_rec,\n", + " 'satellite': feat_sat\n", + " })\n", + " target_dicts_meas.append({\n", + " 'receiver': targ_rec\n", + " })\n", + " edge_index_dicts_meas.append({\n", + " ('receiver', 'to', 'satellite'): edges,\n", + " ('satellite', 'rev_to', 'receiver'): edges_rev\n", + " })\n", + " additional_sids_dicts.append({\n", + " 'satellite_s_ids': s_ids_sat\n", + " })\n", + " \n", + " edge_weight_dicts_meas = [None] * len(time_steps_meas)\n", + " return (\n", + " feature_dicts_meas,\n", + " target_dicts_meas,\n", + " edge_index_dicts_meas,\n", + " edge_weight_dicts_meas,\n", + " time_steps_meas,\n", + " additional_sids_dicts\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "b109c495-b981-4f23-9a3e-3de9c479d5c1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DEBUG: Starting to load and process all measurements...\n", + "Ukupno mjerenja: 50\n", + " Za trening: 25 (IDs: ['R_0', 'R_1', 'R_2', 'R_3', 'R_4', 'R_5', 'R_6', 'R_7', 'R_8', 'R_9', 'R_10', 'R_11', 'R_12', 'R_13', 'R_14', 'R_15', 'R_16', 'R_17', 'R_18', 'R_19', 'R_20', 'R_21', 'R_22', 'R_23', 'R_24'])\n", + " Za test: 25 (IDs: ['R_25', 'R_26', 'R_27', 'R_28', 'R_29', 'R_30', 'R_31', 'R_32', 'R_33', 'R_34', 'R_35', 'R_36', 'R_37', 'R_38', 'R_39', 'R_40', 'R_41', 'R_42', 'R_43', 'R_44', 'R_45', 'R_46', 'R_47', 'R_48', 'R_49'])\n" + ] + } + ], + "source": [ + "# --- 1) Učitavanje i predobrada više mjerenja ---\n", + "print(\"DEBUG: Starting to load and process all measurements...\")\n", + "\n", + "base_server_path = \"/home/jovyan/shared/Ivana_GNN/Sateliti/parsed/Ublox10/cw/-65/\"\n", + "\n", + "# Programski generiramo definicije za R_0 … R_49\n", + "measurement_definitions = []\n", + "for i in range(50):\n", + " folder = f\"R_{i}\"\n", + " sats_path = os.path.join(base_server_path, folder, \"sats_data.csv\")\n", + " receiver_path = os.path.join(base_server_path, folder, \"reciever_data.csv\")\n", + " measurement_definitions.append({\n", + " \"id\": folder,\n", + " \"sats\": sats_path,\n", + " \"receiver\": receiver_path,\n", + " })\n", + "\n", + "all_measurements_processed = []\n", + "for m_info in measurement_definitions:\n", + " #print(f\"Učitavanje mjerenja: {m_info['id']} (Sats: {m_info['sats']}, Receiver: {m_info['receiver']})\")\n", + " if not (os.path.exists(m_info[\"sats\"]) and os.path.exists(m_info[\"receiver\"])):\n", + " print(f\" UPOZORENJE: Datoteke za {m_info['id']} ne postoje, preskačem.\")\n", + " continue\n", + "\n", + " (\n", + " features,\n", + " targets,\n", + " edges,\n", + " weights,\n", + " times,\n", + " additional_sids_dicts\n", + " ) = load_and_process_single_measurement(\n", + " m_info[\"sats\"], m_info[\"receiver\"]\n", + " )\n", + "\n", + " # Ako nema dovoljno snapshotova, preskoči\n", + " if features is None or len(features) < window_size:\n", + " print(f\" INFO: Premalo snimaka ({len(features) if features else 0}), preskačem.\")\n", + " continue\n", + "\n", + " all_measurements_processed.append({\n", + " \"id\": m_info[\"id\"],\n", + " \"features\": features,\n", + " \"targets\": targets,\n", + " \"edges\": edges,\n", + " \"weights\": weights,\n", + " \"time_steps\": times,\n", + " \"satellite_s_ids\": additional_sids_dicts\n", + " })\n", + "\n", + "if not all_measurements_processed:\n", + " raise ValueError(\n", + " \"Nijedno mjerenje nije učitano/obrađeno. Provjerite putanje do datoteka i sadržaj CSV-ova.\"\n", + " )\n", + "\n", + "# Podjela: prva 40 za trening, zadnjih 10 za test\n", + "train_measurements_data = all_measurements_processed[:25]\n", + "test_measurements_data = all_measurements_processed[25:]\n", + "\n", + "print(f\"Ukupno mjerenja: {len(all_measurements_processed)}\")\n", + "print(f\" Za trening: {len(train_measurements_data)} (IDs: {[m['id'] for m in train_measurements_data]})\")\n", + "print(f\" Za test: {len(test_measurements_data)} (IDs: {[m['id'] for m in test_measurements_data]})\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "a76a33ac-5d37-4800-ad04-6c2693a34569", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DEBUG: Aggregating raw train features/targets for StandardScaler...\n" + ] + } + ], + "source": [ + "# 3) Aggregation for normalization\n", + "print(\"DEBUG: Aggregating raw train features/targets for StandardScaler...\")\n", + "\n", + "agg_train_rec_feats = []\n", + "agg_train_sat_feats = []\n", + "agg_train_targ_rec = []\n", + "\n", + "for meas_data in train_measurements_data:\n", + " num_ts = len(meas_data[\"features\"])\n", + " for i in range(num_ts):\n", + " fr = meas_data[\"features\"][i]['receiver']\n", + " agg_train_rec_feats.append(fr)\n", + "\n", + " fs = meas_data[\"features\"][i]['satellite']\n", + " if fs.size > 0:\n", + " agg_train_sat_feats.append(fs)\n", + "\n", + " tr = meas_data[\"targets\"][i]['receiver']\n", + " agg_train_targ_rec.append(tr)\n", + "\n", + "if not agg_train_rec_feats:\n", + " raise ValueError(\"No training data available for normalization statistics.\")\n", + "\n", + "rec_feats_np = np.vstack(agg_train_rec_feats)\n", + "sat_feats_np = np.vstack(agg_train_sat_feats)\n", + "targ_rec_np = np.vstack(agg_train_targ_rec)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "6cdbe27f-561a-42e4-9478-2594c0b69f50", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DEBUG: Fitted StandardScalers on train set.\n" + ] + } + ], + "source": [ + "\n", + "# 4) Fit StandardScalers on TRAINING data \n", + "rec_scaler = StandardScaler().fit(rec_feats_np)\n", + "targ_scaler = StandardScaler().fit(targ_rec_np)\n", + "sat_scaler = StandardScaler().fit(sat_feats_np)\n", + "\n", + "print(\"DEBUG: Fitted StandardScalers on train set.\")\n", + "\n", + "# 5) Normalization function\n", + "def normalize_with_scalers(measurement_data_list, rec_scaler, sat_scaler, targ_scaler):\n", + " normalized_measurements = []\n", + " for meas_data in measurement_data_list:\n", + " norm_feat_dicts = []\n", + " norm_targ_dicts = []\n", + " norm_sids_list = []\n", + "\n", + " num_ts = len(meas_data[\"features\"])\n", + " for i in range(num_ts):\n", + " fr = meas_data[\"features\"][i]['receiver']\n", + " fs = meas_data[\"features\"][i]['satellite']\n", + " sids = meas_data[\"satellite_s_ids\"][i]['satellite_s_ids']\n", + " tr = meas_data[\"targets\"][i]['receiver']\n", + "\n", + " norm_fr = rec_scaler.transform(fr)\n", + " norm_tr = targ_scaler.transform(tr)\n", + "\n", + " if fs.size > 0:\n", + " norm_fs = sat_scaler.transform(fs)\n", + " else:\n", + " norm_fs = fs.copy()\n", + "\n", + " norm_feat_dicts.append({\n", + " 'receiver': norm_fr,\n", + " 'satellite': norm_fs\n", + " })\n", + " norm_targ_dicts.append({\n", + " 'receiver': norm_tr\n", + " })\n", + " norm_sids_list.append({'satellite_s_ids': sids.copy()})\n", + "\n", + " new_meas = {\n", + " **meas_data,\n", + " \"features\": norm_feat_dicts,\n", + " \"targets\": norm_targ_dicts,\n", + " \"satellite_s_ids\": norm_sids_list\n", + " }\n", + " normalized_measurements.append(new_meas)\n", + "\n", + " return normalized_measurements" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "f239b3f0-9993-4318-a951-1e585c815e46", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DEBUG: Finished StandardScaler normalization on train & test sets.\n" + ] + } + ], + "source": [ + "# 6) Apply normalization to train & test\n", + "normalized_train_measurements = normalize_with_scalers(\n", + " train_measurements_data, rec_scaler, sat_scaler, targ_scaler\n", + ")\n", + "normalized_test_measurements = normalize_with_scalers(\n", + " test_measurements_data, rec_scaler, sat_scaler, targ_scaler\n", + ")\n", + "print(\"DEBUG: Finished StandardScaler normalization on train & test sets.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "7041841a-a99b-4a1f-bca4-f34bbb4347f9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DEBUG: Creating DynamicHeteroGraphTemporalSignal objects...\n", + "DEBUG: Finished creating DynamicHeteroGraphTemporalSignal objects.\n" + ] + } + ], + "source": [ + "# 7) Create DynamicHeteroGraphTemporalSignal objects\n", + "print(\"DEBUG: Creating DynamicHeteroGraphTemporalSignal objects...\")\n", + "\n", + "def create_signals(measurements, split_name):\n", + " signals = []\n", + " for meas_data in measurements:\n", + " signal = DynamicHeteroGraphTemporalSignal(\n", + " edge_index_dicts = meas_data[\"edges\"],\n", + " edge_weight_dicts = meas_data[\"weights\"],\n", + " feature_dicts = meas_data[\"features\"],\n", + " target_dicts = meas_data[\"targets\"],\n", + " satellite_s_ids = meas_data[\"satellite_s_ids\"]\n", + " )\n", + " signals.append(signal)\n", + " #print(f\"Created {split_name} signal {meas_data['id']} (snapshots: {signal.snapshot_count})\")\n", + " return signals\n", + "\n", + "train_signals = create_signals(normalized_train_measurements, \"train\")\n", + "test_signals = create_signals(normalized_test_measurements, \"test\")\n", + "\n", + "print(\"DEBUG: Finished creating DynamicHeteroGraphTemporalSignal objects.\")" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "9bbc6a86-db2d-492a-8b16-faf854c798a1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DEBUG: Creating sliding window DataLoaders...\n", + "DEBUG: Total training steps: n_epochs=50, num_train_windows_total=700\n", + "DEBUG: Total training steps: n_epochs=50, num_test_windows_total=700\n", + "DEBUG: total_steps = 35000\n" + ] + } + ], + "source": [ + "# 8) Sliding‐window Dataset & DataLoader\n", + "print(\"DEBUG: Creating sliding window DataLoaders...\")\n", + "\n", + "class SlidingWindowDataset(Dataset):\n", + " def __init__(self, signal, window_size, stride=1):\n", + " self.signal = signal\n", + " self.window_size = window_size\n", + " self.stride = stride\n", + "\n", + " def __len__(self):\n", + " # Number of windows given this stride\n", + " return max(0, (self.signal.snapshot_count - self.window_size) // self.stride + 1)\n", + "\n", + " def __getitem__(self, idx):\n", + " start = idx * self.stride\n", + " end = start + self.window_size\n", + " return [self.signal[t] for t in range(start, end)]\n", + "\n", + "def build_loader(signals, shuffle, stride=1):\n", + " datasets = []\n", + " for sig in signals:\n", + " ds = SlidingWindowDataset(sig, window_size, stride=stride)\n", + " if len(ds) > 0:\n", + " datasets.append(ds)\n", + " if not datasets:\n", + " return None\n", + " concat = ConcatDataset(datasets)\n", + " return DataLoader(\n", + " concat,\n", + " batch_size=1,\n", + " shuffle=shuffle,\n", + " collate_fn=lambda batch: batch[0]\n", + " )\n", + "\n", + "# Choose stride window_size for non‐overlapping windows:\n", + "train_loader = build_loader(train_signals, shuffle=True, stride=window_size)\n", + "test_loader = build_loader(test_signals, shuffle=False, stride=window_size)\n", + "'''\n", + "if train_loader is None or len(train_loader.dataset) == 0:\n", + " raise ValueError(\"No training windows after sliding‐window split.\")\n", + "print(f\"DEBUG: Train DataLoader created. Total windows: {len(train_loader.dataset)}\")\n", + "\n", + "if test_loader is None or len(test_loader.dataset) == 0:\n", + " print(\"DEBUG: No test windows available; skipping test DataLoader creation.\")\n", + "else:\n", + " print(f\"DEBUG: Test DataLoader created. Total windows: {len(test_loader.dataset)}\")\n", + "'''\n", + "num_train_windows_total = len(train_loader.dataset)\n", + "num_test_windows_total = len(test_loader.dataset)\n", + "\n", + "total_steps = n_epochs * num_train_windows_total\n", + "print(f\"DEBUG: Total training steps: n_epochs={n_epochs}, num_train_windows_total={num_train_windows_total}\")\n", + "print(f\"DEBUG: Total training steps: n_epochs={n_epochs}, num_test_windows_total={num_test_windows_total}\")\n", + "\n", + "print(f\"DEBUG: total_steps = {total_steps}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "d55c1f9f-042b-419b-ab45-09e643de25a0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DEBUG: Defining run_window function...\n", + "DEBUG: run_window defined.\n" + ] + } + ], + "source": [ + "# 9) Define run_window function\n", + "print(\"DEBUG: Defining run_window function...\")\n", + "\n", + "def run_window(window_snapshots, model, hidden_dim, device):\n", + " \"\"\"\n", + " window_Snapshots: list of HeteroData objects (length = window_size)\n", + " model: FullModel (contains GCLSTM + regression head)\n", + " hidden_dim: int\n", + " device: torch.device\n", + "\n", + " Returns:\n", + " pred_norm: (1x2) tensor\n", + " true_norm: (1x2) tensor\n", + " \"\"\"\n", + " h_state = {'receiver': torch.zeros(hidden_dim, device=device)}\n", + " c_state = {'receiver': torch.zeros(hidden_dim, device=device)}\n", + "\n", + " for snapshot in window_snapshots:\n", + " x_dict_for_gclstm = {\n", + " 'receiver': snapshot.x_dict['receiver'].to(device),\n", + " 'satellite': snapshot.x_dict['satellite'].to(device)\n", + " }\n", + " eidx_on_device = {\n", + " rel: snapshot.edge_index_dict[rel].to(device)\n", + " for rel in snapshot.edge_index_dict\n", + " }\n", + "\n", + " rec_h = h_state['receiver'].unsqueeze(0) # (1xhidden_dim)\n", + " rec_c = c_state['receiver'].unsqueeze(0)\n", + "\n", + " s_ids_val = snapshot['satellite_s_ids']['satellite_s_ids']\n", + " if isinstance(s_ids_val, torch.Tensor):\n", + " s_ids_np = s_ids_val.cpu().numpy()\n", + " else:\n", + " s_ids_np = np.array(s_ids_val, dtype=np.int64)\n", + " num_sat = len(s_ids_np)\n", + "\n", + " h_sat = torch.zeros((num_sat, hidden_dim), device=device)\n", + " c_sat = torch.zeros((num_sat, hidden_dim), device=device)\n", + " for j, sid in enumerate(s_ids_np):\n", + " if sid in h_state:\n", + " h_sat[j] = h_state[sid]\n", + " c_sat[j] = c_state[sid]\n", + "\n", + " h_dict_step = {'receiver': rec_h, 'satellite': h_sat}\n", + " c_dict_step = {'receiver': rec_c, 'satellite': c_sat}\n", + "\n", + " h_out, c_out = model.gclstm(x_dict_for_gclstm, eidx_on_device, h_dict_step, c_dict_step)\n", + "\n", + " h_state['receiver'] = h_out['receiver'][0]\n", + " c_state['receiver'] = c_out['receiver'][0]\n", + " for j, sid in enumerate(s_ids_np.tolist()):\n", + " h_state[sid] = h_out['satellite'][j]\n", + " c_state[sid] = c_out['satellite'][j]\n", + "\n", + " h_final = h_state['receiver'].unsqueeze(0) # (1xhidden_dim)\n", + " h_dropped = model.dropout(h_final)\n", + " pred_norm = torch.cat([model.lin_lat(h_dropped), model.lin_lon(h_dropped)], dim=-1) # (1x2)\n", + "\n", + " true_norm = window_snapshots[-1].y_dict['receiver'].to(device) # (1x2)\n", + " return pred_norm, true_norm\n", + "\n", + "print(\"DEBUG: run_window defined.\")" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "9ac8fe60-3b1c-476e-9c92-0f0f1a5fd2b7", + "metadata": {}, + "outputs": [], + "source": [ + "# 10) Define FullModel \n", + "class FullModel(nn.Module):\n", + " def __init__(self, in_channels_dict, hidden_dim, metadata, dropout_rate=0.1):\n", + " super().__init__()\n", + " self.gclstm = HeteroGCLSTM(\n", + " in_channels_dict = in_channels_dict,\n", + " out_channels = hidden_dim,\n", + " metadata = metadata\n", + " )\n", + " self.dropout = nn.Dropout(dropout_rate)\n", + " self.lin_lat = nn.Linear(hidden_dim, 1)\n", + " self.lin_lon = nn.Linear(hidden_dim, 1)\n", + "\n", + " def forward(self, x_dict, edge_index_dict, h_dict=None, c_dict=None):\n", + " h_out, c_out = self.gclstm(x_dict, edge_index_dict, h_dict, c_dict)\n", + " h_rec = h_out['receiver'] # (1xhidden_dim)\n", + " h_rec = self.dropout(h_rec)\n", + " coords = torch.cat([self.lin_lat(h_rec), self.lin_lon(h_rec)], dim=-1) # (1x2)\n", + " return coords, h_out, c_out\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "d33440c3-4054-40d6-acc0-35fed184a15e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DEBUG: Using device: cuda\n", + "DEBUG: Model, optimizer, loss_fn, and scheduler are set up.\n" + ] + } + ], + "source": [ + "# 11) Initialize model, optimizer, loss, scheduler\n", + "if not train_signals or train_signals[0].snapshot_count == 0:\n", + " raise ValueError(\"No training signals or first signal is empty for metadata.\")\n", + "\n", + "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", + "print(f\"DEBUG: Using device: {device}\")\n", + "\n", + "model = FullModel(\n", + " in_channels_dict = {'receiver': 2, 'satellite': 3},\n", + " hidden_dim = hidden_dim,\n", + " metadata = train_signals[0][0].metadata()\n", + ").to(device)\n", + "\n", + "optimizer = torch.optim.Adam(\n", + " model.parameters(),\n", + " lr=initial_lr,\n", + " weight_decay=weight_decay_val\n", + ")\n", + "loss_fn = nn.SmoothL1Loss(beta=1e-2)\n", + "\n", + "scheduler = OneCycleLR(\n", + " optimizer,\n", + " max_lr = max_lr_onecycle,\n", + " total_steps = total_steps,\n", + " pct_start = pct_start_onecycle,\n", + " anneal_strategy = 'cos'\n", + ")\n", + "\n", + "print(\"DEBUG: Model, optimizer, loss_fn, and scheduler are set up.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "e135735a-6dc6-4094-bcbe-68d265175ede", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DEBUG: Starting training loop...\n", + "Beginning training...\n", + "Epoch 1/50 — Start training\n", + "Epoch 001 — Train Loss: 1.1225, Test Loss: 0.4176 — MAE Lat: 5.66 cm, Lon: 5.35 cm (sum: 11.01 cm)\n", + " >> Best model updated.\n", + "Epoch 2/50 — Start training\n", + "Epoch 002 — Train Loss: 0.2913, Test Loss: 0.2280 — MAE Lat: 2.83 cm, Lon: 3.28 cm (sum: 6.12 cm)\n", + " >> Best model updated.\n", + "Epoch 3/50 — Start training\n", + "Epoch 003 — Train Loss: 0.1869, Test Loss: 0.1667 — MAE Lat: 1.96 cm, Lon: 2.57 cm (sum: 4.54 cm)\n", + " >> Best model updated.\n", + "Epoch 4/50 — Start training\n", + "Epoch 004 — Train Loss: 0.1520, Test Loss: 0.2174 — MAE Lat: 2.93 cm, Lon: 2.92 cm (sum: 5.85 cm)\n", + "Epoch 5/50 — Start training\n", + "Epoch 005 — Train Loss: 0.1394, Test Loss: 0.1045 — MAE Lat: 1.22 cm, Lon: 1.71 cm (sum: 2.93 cm)\n", + " >> Best model updated.\n", + "Epoch 6/50 — Start training\n", + "Epoch 006 — Train Loss: 0.1337, Test Loss: 0.1113 — MAE Lat: 1.53 cm, Lon: 1.58 cm (sum: 3.11 cm)\n", + "Epoch 7/50 — Start training\n", + "Epoch 007 — Train Loss: 0.1360, Test Loss: 0.0880 — MAE Lat: 1.06 cm, Lon: 1.44 cm (sum: 2.50 cm)\n", + " >> Best model updated.\n", + "Epoch 8/50 — Start training\n", + "Epoch 008 — Train Loss: 0.1314, Test Loss: 0.1729 — MAE Lat: 2.34 cm, Lon: 2.36 cm (sum: 4.70 cm)\n", + "Epoch 9/50 — Start training\n", + "Epoch 009 — Train Loss: 0.1262, Test Loss: 0.1355 — MAE Lat: 1.97 cm, Lon: 1.77 cm (sum: 3.74 cm)\n", + "Epoch 10/50 — Start training\n", + "Epoch 010 — Train Loss: 0.1355, Test Loss: 0.0853 — MAE Lat: 1.09 cm, Lon: 1.35 cm (sum: 2.43 cm)\n", + " >> Best model updated.\n", + "Epoch 11/50 — Start training\n", + "Epoch 011 — Train Loss: 0.1343, Test Loss: 0.3342 — MAE Lat: 5.26 cm, Lon: 3.62 cm (sum: 8.87 cm)\n", + "Epoch 12/50 — Start training\n", + "Epoch 012 — Train Loss: 0.1327, Test Loss: 0.0961 — MAE Lat: 1.32 cm, Lon: 1.40 cm (sum: 2.72 cm)\n", + "Epoch 13/50 — Start training\n", + "Epoch 013 — Train Loss: 0.1268, Test Loss: 0.0865 — MAE Lat: 1.56 cm, Lon: 0.91 cm (sum: 2.47 cm)\n", + "Epoch 14/50 — Start training\n", + "Epoch 014 — Train Loss: 0.1255, Test Loss: 0.1017 — MAE Lat: 1.09 cm, Lon: 1.76 cm (sum: 2.85 cm)\n", + "Epoch 15/50 — Start training\n", + "Epoch 015 — Train Loss: 0.1264, Test Loss: 0.1670 — MAE Lat: 2.15 cm, Lon: 2.39 cm (sum: 4.55 cm)\n", + "Epoch 16/50 — Start training\n", + "Epoch 016 — Train Loss: 0.1260, Test Loss: 0.1126 — MAE Lat: 1.37 cm, Lon: 1.78 cm (sum: 3.14 cm)\n", + "Epoch 17/50 — Start training\n", + "Epoch 017 — Train Loss: 0.1171, Test Loss: 0.1570 — MAE Lat: 2.43 cm, Lon: 1.87 cm (sum: 4.30 cm)\n", + "Epoch 18/50 — Start training\n", + "Epoch 018 — Train Loss: 0.1179, Test Loss: 0.1020 — MAE Lat: 1.59 cm, Lon: 1.29 cm (sum: 2.88 cm)\n", + "Epoch 19/50 — Start training\n", + "Epoch 019 — Train Loss: 0.1221, Test Loss: 0.1262 — MAE Lat: 1.66 cm, Lon: 1.83 cm (sum: 3.50 cm)\n", + "Epoch 20/50 — Start training\n", + "Epoch 020 — Train Loss: 0.1209, Test Loss: 0.1584 — MAE Lat: 1.88 cm, Lon: 2.44 cm (sum: 4.32 cm)\n", + "Epoch 21/50 — Start training\n", + "Epoch 021 — Train Loss: 0.1105, Test Loss: 0.1026 — MAE Lat: 1.73 cm, Lon: 1.16 cm (sum: 2.89 cm)\n", + "Epoch 22/50 — Start training\n", + "Epoch 022 — Train Loss: 0.1171, Test Loss: 0.0881 — MAE Lat: 1.26 cm, Lon: 1.25 cm (sum: 2.51 cm)\n", + "Epoch 23/50 — Start training\n", + "Epoch 023 — Train Loss: 0.1122, Test Loss: 0.1082 — MAE Lat: 0.99 cm, Lon: 2.03 cm (sum: 3.02 cm)\n", + "Epoch 24/50 — Start training\n", + "Epoch 024 — Train Loss: 0.1117, Test Loss: 0.1029 — MAE Lat: 1.32 cm, Lon: 1.57 cm (sum: 2.89 cm)\n", + "Epoch 25/50 — Start training\n", + "Epoch 025 — Train Loss: 0.1139, Test Loss: 0.1266 — MAE Lat: 1.42 cm, Lon: 2.08 cm (sum: 3.50 cm)\n", + "Epoch 26/50 — Start training\n", + "Epoch 026 — Train Loss: 0.1070, Test Loss: 0.0862 — MAE Lat: 1.48 cm, Lon: 0.99 cm (sum: 2.47 cm)\n", + "Epoch 27/50 — Start training\n", + "Epoch 027 — Train Loss: 0.1007, Test Loss: 0.0955 — MAE Lat: 1.65 cm, Lon: 1.06 cm (sum: 2.71 cm)\n", + "Epoch 28/50 — Start training\n", + "Epoch 028 — Train Loss: 0.1043, Test Loss: 0.0718 — MAE Lat: 1.04 cm, Lon: 1.05 cm (sum: 2.09 cm)\n", + " >> Best model updated.\n", + "Epoch 29/50 — Start training\n", + "Epoch 029 — Train Loss: 0.1021, Test Loss: 0.0926 — MAE Lat: 1.41 cm, Lon: 1.22 cm (sum: 2.63 cm)\n", + "Epoch 30/50 — Start training\n", + "Epoch 030 — Train Loss: 0.1023, Test Loss: 0.0922 — MAE Lat: 1.21 cm, Lon: 1.40 cm (sum: 2.61 cm)\n", + "Epoch 31/50 — Start training\n", + "Epoch 031 — Train Loss: 0.0976, Test Loss: 0.0894 — MAE Lat: 1.19 cm, Lon: 1.35 cm (sum: 2.55 cm)\n", + "Epoch 32/50 — Start training\n", + "Epoch 032 — Train Loss: 0.0917, Test Loss: 0.0782 — MAE Lat: 1.20 cm, Lon: 1.05 cm (sum: 2.25 cm)\n", + "Epoch 33/50 — Start training\n", + "Epoch 033 — Train Loss: 0.1006, Test Loss: 0.0856 — MAE Lat: 1.13 cm, Lon: 1.32 cm (sum: 2.45 cm)\n", + "Epoch 34/50 — Start training\n", + "Epoch 034 — Train Loss: 0.0924, Test Loss: 0.0817 — MAE Lat: 1.16 cm, Lon: 1.19 cm (sum: 2.35 cm)\n", + "Epoch 35/50 — Start training\n", + "Epoch 035 — Train Loss: 0.0906, Test Loss: 0.0965 — MAE Lat: 1.55 cm, Lon: 1.17 cm (sum: 2.73 cm)\n", + "Epoch 36/50 — Start training\n", + "Epoch 036 — Train Loss: 0.0892, Test Loss: 0.0896 — MAE Lat: 1.54 cm, Lon: 1.02 cm (sum: 2.56 cm)\n", + "Epoch 37/50 — Start training\n", + "Epoch 037 — Train Loss: 0.0877, Test Loss: 0.0786 — MAE Lat: 1.28 cm, Lon: 0.99 cm (sum: 2.27 cm)\n", + "Epoch 38/50 — Start training\n", + "Epoch 038 — Train Loss: 0.0875, Test Loss: 0.0683 — MAE Lat: 0.99 cm, Lon: 1.01 cm (sum: 2.00 cm)\n", + " >> Best model updated.\n", + "Epoch 39/50 — Start training\n", + "Epoch 039 — Train Loss: 0.0867, Test Loss: 0.0838 — MAE Lat: 1.03 cm, Lon: 1.37 cm (sum: 2.40 cm)\n", + "Epoch 40/50 — Start training\n", + "Epoch 040 — Train Loss: 0.0851, Test Loss: 0.0697 — MAE Lat: 0.98 cm, Lon: 1.05 cm (sum: 2.03 cm)\n", + "Epoch 41/50 — Start training\n", + "Epoch 041 — Train Loss: 0.0840, Test Loss: 0.0643 — MAE Lat: 0.88 cm, Lon: 1.01 cm (sum: 1.89 cm)\n", + " >> Best model updated.\n", + "Epoch 42/50 — Start training\n", + "Epoch 042 — Train Loss: 0.0843, Test Loss: 0.0628 — MAE Lat: 0.87 cm, Lon: 0.99 cm (sum: 1.85 cm)\n", + " >> Best model updated.\n", + "Epoch 43/50 — Start training\n", + "Epoch 043 — Train Loss: 0.0808, Test Loss: 0.0650 — MAE Lat: 0.93 cm, Lon: 0.97 cm (sum: 1.91 cm)\n", + "Epoch 44/50 — Start training\n", + "Epoch 044 — Train Loss: 0.0797, Test Loss: 0.0642 — MAE Lat: 0.92 cm, Lon: 0.97 cm (sum: 1.89 cm)\n", + "Epoch 45/50 — Start training\n", + "Epoch 045 — Train Loss: 0.0795, Test Loss: 0.0630 — MAE Lat: 0.91 cm, Lon: 0.95 cm (sum: 1.86 cm)\n", + "Epoch 46/50 — Start training\n", + "Epoch 046 — Train Loss: 0.0788, Test Loss: 0.0626 — MAE Lat: 0.91 cm, Lon: 0.94 cm (sum: 1.85 cm)\n", + " >> Best model updated.\n", + "Epoch 47/50 — Start training\n", + "Epoch 047 — Train Loss: 0.0796, Test Loss: 0.0622 — MAE Lat: 0.90 cm, Lon: 0.94 cm (sum: 1.84 cm)\n", + " >> Best model updated.\n", + "Epoch 48/50 — Start training\n", + "Epoch 048 — Train Loss: 0.0791, Test Loss: 0.0615 — MAE Lat: 0.88 cm, Lon: 0.93 cm (sum: 1.82 cm)\n", + " >> Best model updated.\n", + "Epoch 49/50 — Start training\n", + "Epoch 049 — Train Loss: 0.0777, Test Loss: 0.0619 — MAE Lat: 0.89 cm, Lon: 0.94 cm (sum: 1.83 cm)\n", + "Epoch 50/50 — Start training\n", + "Epoch 050 — Train Loss: 0.0783, Test Loss: 0.0620 — MAE Lat: 0.90 cm, Lon: 0.94 cm (sum: 1.83 cm)\n", + "\n", + "--- Best performance on test set ---\n", + "Epoch: 48\n", + "Test Loss: 0.0615\n", + "MAE Lat: 0.88 cm, Lon: 0.93 cm (sum: 1.82 cm)\n", + "Best model saved as 'best_full_model.pth'\n" + ] + } + ], + "source": [ + "# 12) Training & evaluation loop\n", + "print(\"DEBUG: Starting training loop...\")\n", + "train_losses_epochs, test_losses_epochs, mae_sums_epochs = [], [], []\n", + "best_test_loss = float('inf')\n", + "best_epoch_metrics = {}\n", + "best_model_state = None\n", + "\n", + "print(\"Beginning training...\")\n", + "for epoch in range(1, n_epochs + 1):\n", + " print(f\"Epoch {epoch}/{n_epochs} — Start training\")\n", + " model.train()\n", + " total_train_loss_epoch = 0.0\n", + "\n", + " if train_loader is None or len(train_loader.dataset) == 0:\n", + " print(f\"Epoch {epoch:03d} — Training skipped (empty train_loader).\")\n", + " train_losses_epochs.append(float('nan'))\n", + " test_losses_epochs.append(float('nan'))\n", + " mae_sums_epochs.append(float('nan'))\n", + " continue\n", + "\n", + " # trening\n", + " for window_snapshots in train_loader:\n", + " optimizer.zero_grad()\n", + " pred_norm, true_norm = run_window(window_snapshots, model, hidden_dim, device)\n", + " lat_p, lon_p = pred_norm[:, 0], pred_norm[:, 1]\n", + " lat_t, lon_t = true_norm[:, 0], true_norm[:, 1]\n", + "\n", + " loss_lat = loss_fn(lat_p.unsqueeze(1), lat_t.unsqueeze(1))\n", + " loss_lon = loss_fn(lon_p.unsqueeze(1), lon_t.unsqueeze(1))\n", + " loss = loss_lat + loss_lon\n", + "\n", + " loss.backward()\n", + " torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n", + " optimizer.step()\n", + " scheduler.step()\n", + "\n", + " total_train_loss_epoch += loss.item()\n", + "\n", + " avg_train_loss_epoch = total_train_loss_epoch / len(train_loader)\n", + " train_losses_epochs.append(avg_train_loss_epoch)\n", + "\n", + " # evaluacija\n", + " if test_loader is not None and len(test_loader.dataset) > 0:\n", + " model.eval()\n", + " total_test_loss_epoch = 0.0\n", + " abs_errs_epoch = []\n", + "\n", + " with torch.no_grad():\n", + " for window_snapshots_test in test_loader:\n", + " pred_norm_test, true_norm_test = run_window(window_snapshots_test, model, hidden_dim, device)\n", + " lat_p_t, lon_p_t = pred_norm_test[:, 0], pred_norm_test[:, 1]\n", + " lat_t_t, lon_t_t = true_norm_test[:, 0], true_norm_test[:, 1]\n", + "\n", + " lt = loss_fn(lat_p_t.unsqueeze(1), lat_t_t.unsqueeze(1))\n", + " ln = loss_fn(lon_p_t.unsqueeze(1), lon_t_t.unsqueeze(1))\n", + " total_test_loss_epoch += (lt + ln).item()\n", + "\n", + " # un-normalize za MAE\n", + " pred_cm = pred_norm_test.cpu().numpy() * targ_scaler.scale_ + targ_scaler.mean_\n", + " true_cm = true_norm_test.cpu().numpy() * targ_scaler.scale_ + targ_scaler.mean_\n", + " abs_errs_epoch.append(np.abs(pred_cm - true_cm))\n", + "\n", + " avg_test_loss_epoch = total_test_loss_epoch / len(test_loader)\n", + " test_losses_epochs.append(avg_test_loss_epoch)\n", + "\n", + " if abs_errs_epoch:\n", + " errors_epoch = np.vstack(abs_errs_epoch)\n", + " mae_lat_epoch = errors_epoch[:, 0].mean()\n", + " mae_lon_epoch = errors_epoch[:, 1].mean()\n", + " mae_sum_epoch = mae_lat_epoch + mae_lon_epoch\n", + " mae_sums_epochs.append(mae_sum_epoch)\n", + "\n", + " print(\n", + " f\"Epoch {epoch:03d} — \"\n", + " f\"Train Loss: {avg_train_loss_epoch:.4f}, \"\n", + " f\"Test Loss: {avg_test_loss_epoch:.4f} — \"\n", + " f\"MAE Lat: {mae_lat_epoch:.2f} cm, \"\n", + " f\"Lon: {mae_lon_epoch:.2f} cm \"\n", + " f\"(sum: {mae_sum_epoch:.2f} cm)\"\n", + " )\n", + "\n", + " if avg_test_loss_epoch < best_test_loss:\n", + " best_test_loss = avg_test_loss_epoch\n", + " best_epoch_metrics = {\n", + " 'epoch': epoch,\n", + " 'test_loss': avg_test_loss_epoch,\n", + " 'mae_lat': mae_lat_epoch,\n", + " 'mae_lon': mae_lon_epoch,\n", + " 'mae_sum': mae_sum_epoch\n", + " }\n", + " best_model_state = copy.deepcopy(model.state_dict())\n", + " torch.save(best_model_state, 'best_full_model.pth')\n", + " print(f\" >> Best model updated.\")\n", + " else:\n", + " mae_sums_epochs.append(float('nan'))\n", + " print(\n", + " f\"Epoch {epoch:03d} — \"\n", + " f\"Train Loss: {avg_train_loss_epoch:.4f}, \"\n", + " f\"Test Loss: {avg_test_loss_epoch:.4f} (No MAE)\"\n", + " )\n", + " else:\n", + " test_losses_epochs.append(float('nan'))\n", + " mae_sums_epochs.append(float('nan'))\n", + " print(f\"Epoch {epoch:03d} — Train Loss: {avg_train_loss_epoch:.4f} (Test skipped)\")\n", + "\n", + "if best_epoch_metrics:\n", + " print(\"\\n--- Best performance on test set ---\")\n", + " print(f\"Epoch: {best_epoch_metrics['epoch']}\")\n", + " print(f\"Test Loss: {best_epoch_metrics['test_loss']:.4f}\")\n", + " print(\n", + " f\"MAE Lat: {best_epoch_metrics['mae_lat']:.2f} cm, \"\n", + " f\"Lon: {best_epoch_metrics['mae_lon']:.2f} cm \"\n", + " f\"(sum: {best_epoch_metrics['mae_sum']:.2f} cm)\"\n", + " )\n", + " print(\"Best model saved as 'best_full_model.pth'\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "e14e40aa-928c-4067-98fa-73533a08bc54", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAArMAAAGHCAYAAACj5No9AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB8LUlEQVR4nO3dd3yTVfvH8c+dNN0to4wWKUNAhgwZgqAs2Si48VEQUFABF+LC8ciQR5SfA30UHgdDRBEHIioidTAUkD0EREWWUChDaCl0Jffvj5BA6EratGnL9/169dXmzsmdk5wErpxc5zqGaZomIiIiIiKlkCXQHRARERERKSgFsyIiIiJSaimYFREREZFSS8GsiIiIiJRaCmZFREREpNRSMCsiIiIipZaCWREREREptRTMioiIiEippWBWREREREotBbMixcAwDK9+lixZUqj7GTt2LIZhFOi2S5Ys8UsfSoPXXnsNwzBYtGhRrm3eeecdDMNg3rx5Xp+3U6dOdOrUyeOYYRiMHTs239vOnDkTwzDYvXu31/fnsnDhwlzvo1atWgwePNjncxaW6/X06aefFvt9+8L1njly5EiR3s/gwYPzfO8Hmuv1t3bt2kB3RcRnQYHugMiFYOXKlR6Xn3vuOX788Ud++OEHj+ONGjUq1P0MHTqUnj17Fui2LVq0YOXKlYXuQ2kwYMAAnnjiCaZPn57r8zVjxgwqV65Mnz59CnVfK1eupHr16oU6R34WLlzIm2++mWNA+/nnnxMdHV2k9y/eCQsLy/aeF5HCUzArUgyuuOIKj8uVK1fGYrFkO36+U6dOER4e7vX9VK9evcCBU3R0dL79KStiYmK47rrrmD9/PkePHiUmJsbj+t9++42VK1fyyCOPYLPZCnVfgX5OmzdvHtD7l7O8ec+LiO+UZiBSQnTq1InGjRuzbNky2rVrR3h4OHfddRcAc+fOpXv37sTFxREWFkbDhg0ZPXo0qampHufIKc2gVq1aXHvttSxatIgWLVoQFhZGgwYNmD59uke7nNIMBg8eTGRkJH/++Se9e/cmMjKS+Ph4HnnkEdLT0z1u//fff3PzzTcTFRVF+fLl6d+/P2vWrMEwDGbOnJnr4960aROGYTBt2rRs133zzTcYhsGCBQsAOHz4MPfccw/x8fGEhIRQuXJlrrzySr777rt8n9/zDRkyhIyMDD788MNs182YMQPA/fyPGzeONm3aULFiRaKjo2nRogXTpk3DNM187yenNINVq1Zx5ZVXEhoaSrVq1XjyySfJzMzMdltvxn3w4MG8+eab7vty/bjSFXJKM9i7dy8DBgygSpUqhISE0LBhQ15++WUcDoe7ze7duzEMg5deeolXXnmF2rVrExkZSdu2bVm1alW+j9tbv/76K9dddx0VKlQgNDSUyy67jPfee8+jjcPhYMKECdSvX5+wsDDKly9P06ZNee2119xtCvva2LdvHzfeeCPR0dGUK1eOAQMGcPjwYff1Q4YMoWLFipw6dSrbba+++mouvfTSAj4Dnlzvw9mzZzNq1ChiY2MJCwujY8eObNiwIVv7BQsW0LZtW8LDw4mKiqJbt27ZvgkC5we02267japVqxISEkKNGjUYOHBgtvdxSkoKw4cPp1KlSsTExHDjjTdy4MABvzw2kaKimVmREiQxMZEBAwbw+OOP8/zzz2OxOD9v/vHHH/Tu3ZuRI0cSERHBb7/9xosvvsjq1au9+tpy06ZNPPLII4wePZqqVavy7rvvMmTIEOrWrUuHDh3yvG1mZiZ9+/ZlyJAhPPLIIyxbtoznnnuOcuXK8eyzzwKQmppK586dOXbsGC+++CJ169Zl0aJF3Hrrrfn2rVmzZjRv3pwZM2YwZMgQj+tmzpxJlSpV6N27NwB33HEH69ev5z//+Q+XXHIJx48fZ/369Rw9ejTf+zlf165dqVmzJtOnT+eBBx5wH7fb7bz//vtcccUV7pSL3bt3c++991KjRg3AGYw+8MAD7N+/3/0ceGvbtm106dKFWrVqMXPmTMLDw5kyZUqOQbU34/7vf/+b1NRUPv30U48gJi4uLsf7P3z4MO3atSMjI4PnnnuOWrVq8dVXX/Hoo4+yc+dOpkyZ4tH+zTffpEGDBkyePNl9f71792bXrl2UK1fOp8d+vh07dtCuXTuqVKnC66+/TkxMDLNnz2bw4MEcOnSIxx9/HIBJkyYxduxYnnnmGTp06EBmZia//fYbx48fd5+rsK+NG264gX79+jFs2DC2bt3Kv//9b7Zt28Yvv/yCzWbjoYceYvr06Xz44YcMHTrUfbtt27bx448/uj9Q5CcrKyvbMYvF4n6vuzz11FO0aNGCd999lxMnTjB27Fg6derEhg0buPjiiwH48MMP6d+/P927d2fOnDmkp6czadIkOnXqxPfff89VV10FON//V111FZUqVWL8+PHUq1ePxMREFixYQEZGBiEhIe77HTp0KNdccw0ffvgh+/bt47HHHmPAgAFKj5CSzRSRYjdo0CAzIiLC41jHjh1NwPz+++/zvK3D4TAzMzPNpUuXmoC5adMm93Vjxowxz39b16xZ0wwNDTX37NnjPnb69GmzYsWK5r333us+9uOPP5qA+eOPP3r0EzA//vhjj3P27t3brF+/vvvym2++aQLmN99849Hu3nvvNQFzxowZeT6m119/3QTMHTt2uI8dO3bMDAkJMR955BH3scjISHPkyJF5nssXrudr/fr17mNffvmlCZjvvPNOjrex2+1mZmamOX78eDMmJsZ0OBzu6zp27Gh27NjRoz1gjhkzxn351ltvNcPCwsyDBw+6j2VlZZkNGjQwAXPXrl053m9e437fffdlG3eXmjVrmoMGDXJfHj16tAmYv/zyi0e74cOHm4ZhuMdg165dJmA2adLEzMrKcrdbvXq1CZhz5szJ8f5cXK+nTz75JNc2//rXv8yQkBBz7969Hsd79eplhoeHm8ePHzdN0zSvvfZa87LLLsvz/gr62nC9Bh5++GGP4x988IEJmLNnz3Yf69ixY7Z+DB8+3IyOjjZTUlLyvB/Xeymnny5durjbuZ63Fi1aeLy2du/ebdpsNnPo0KGmaTpfh9WqVTObNGli2u12d7uUlBSzSpUqZrt27dzHrr76arN8+fJmUlJSrv2bMWOGCZgjRozwOD5p0iQTMBMTE/N8fCKBpDQDkRKkQoUKXH311dmO//XXX9x+++3ExsZitVqx2Wx07NgRgO3bt+d73ssuu8w9qwgQGhrKJZdcwp49e/K9rWEY2RZBNW3a1OO2S5cuJSoqKttiqttuuy3f8wP079+fkJAQj3QE10zTnXfe6T7WunVrZs6cyYQJE1i1alWOX8374s4778RisXikXMyYMYOIiAiPWeUffviBrl27Uq5cOffz/+yzz3L06FGSkpJ8us8ff/yRLl26ULVqVfcxq9Wa4yx2Ycc9Jz/88AONGjWidevWHscHDx6MaZrZZuCuueYarFar+3LTpk0BvHrteNOXLl26EB8fn60vp06dcs80t27dmk2bNjFixAi+/fZbkpOTs52rsK+N/v37e1zu168fQUFB/Pjjj+5jDz30EBs3buTnn38GIDk5mffff59BgwYRGRmZ732EhYWxZs2abD/nz4YD3H777R4pQzVr1qRdu3bu/uzYsYMDBw5wxx13eMzqRkZGctNNN7Fq1SpOnTrFqVOnWLp0Kf369aNy5cr59rFv374el/053iJFRcGsSAmS01fDJ0+epH379vzyyy9MmDCBJUuWsGbNGnfJqNOnT+d73vMXOAGEhIR4ddvw8HBCQ0Oz3TYtLc19+ejRox7BmUtOx3JSsWJF+vbty6xZs7Db7YAzxaB169YeuYhz585l0KBBvPvuu7Rt25aKFSsycOBADh486NX9nK9mzZp06dKFDz/8kPT0dI4cOcJXX33FLbfcQlRUFACrV6+me/fugLNc188//8yaNWt4+umnAe+e/3MdPXqU2NjYbMfPP+aPcc/t/nN6nVWrVs19/bnOf+24vpIu6P0XpC9PPvkkL730EqtWraJXr17ExMTQpUsXjzJShX1tnP/8BwUFERMT4/F8XHfdddSqVcudUjBz5kxSU1O57777vLoPi8VCq1atsv1ccskl+fbHdczVH9fv3J4/h8PBP//8wz///IPdbvd6YWhRjrdIUVEwK1KC5FRv8ocffuDAgQNMnz6doUOH0qFDB1q1auUOtkqCmJgYDh06lO24L0HmnXfeyf79+0lISGDbtm2sWbPGY1YWoFKlSkyePJndu3ezZ88eJk6cyLx58wpVR3XIkCEcO3aML774gtmzZ5ORkeGRu/vRRx9hs9n46quv6NevH+3ataNVq1YFvr+YmJgcn5fzjxXVuMfExJCYmJjtuGuRT6VKlQp1/qLoS1BQEKNGjWL9+vUcO3aMOXPmsG/fPnr06OFekFXY18b5z39WVla2ShcWi4X77ruPTz/9lMTERKZMmUKXLl2oX79+QR6+T/1xHXP1x/U7t+fPYrFQoUIFKlasiNVq5e+///Z7H0VKCgWzIiWcK8A9d5EGwFtvvRWI7uSoY8eOpKSk8M0333gc/+ijj7w+R/fu3bnooouYMWMGM2bMIDQ0NM80hRo1anD//ffTrVs31q9fX+C+X3/99cTExDB9+nRmzJjBJZdc4l44A87nPygoyOOr9tOnT/P+++8X6P46d+7M999/7xH82+125s6d69HOl3H3ZfasS5cubNu2LdtzNmvWLAzDoHPnzt49ED/o0qWLO2g/vy/h4eE5lrEqX748N998M/fddx/Hjh3LcZOJgrw2PvjgA4/LH3/8MVlZWdk2wRg6dCjBwcH079+fHTt2cP/993t1fl/NmTPHo1rGnj17WLFihbs/9evX56KLLuLDDz/0aJeamspnn33mrnDgqoTwySefFPnGECKBomoGIiVcu3btqFChAsOGDWPMmDHYbDY++OADNm3aFOiuuQ0aNIhXX32VAQMGMGHCBOrWrcs333zDt99+C5BtpXZOrFYrAwcO5JVXXiE6Opobb7zRY7X8iRMn6Ny5M7fffjsNGjQgKiqKNWvWsGjRIm688UZ3u/HjxzN+/Hi+//57d35pXkJCQujfvz///e9/MU2TF154weP6a665hldeeYXbb7+de+65h6NHj/LSSy9lCzK99cwzz7BgwQKuvvpqnn32WcLDw3nzzTezlVnzZdybNGkCwIsvvkivXr2wWq00bdqU4ODgbG0ffvhhZs2axTXXXMP48eOpWbMmX3/9NVOmTGH48OE5fuVdGLmV8erYsSNjxozhq6++onPnzjz77LNUrFiRDz74gK+//ppJkya5x79Pnz40btyYVq1aUblyZfbs2cPkyZOpWbMm9erV8/q1kZd58+YRFBREt27d3NUMmjVrRr9+/TzalS9fnoEDBzJ16lRq1qzp06YaDocj1+ejefPmHq+ppKQkbrjhBu6++25OnDjBmDFjCA0N5cknnwSc76lJkybRv39/rr32Wu69917S09P5v//7P44fP+7xOn7llVe46qqraNOmDaNHj6Zu3bocOnSIBQsW8NZbb5Wob3lECiSw689ELky5VTO49NJLc2y/YsUKs23btmZ4eLhZuXJlc+jQoeb69euzVQrIrZrBNddck+2c56+8z62awfn9zO1+9u7da954441mZGSkGRUVZd50003mwoULTcD84osvcnsqPPz+++/uFd4JCQke16WlpZnDhg0zmzZtakZHR5thYWFm/fr1zTFjxpipqanZ+nbu48jPpk2bTMC0Wq3mgQMHsl0/ffp0s379+mZISIh58cUXmxMnTjSnTZuWrfqAN9UMTNM0f/75Z/OKK64wQ0JCzNjYWPOxxx4z33777Wzn83bc09PTzaFDh5qVK1c2DcPwOM/51QxM0zT37Nlj3n777WZMTIxps9nM+vXrm//3f//nsSreVc3g//7v/7I9Hzk9pvO5Xk+5/bjGZ8uWLWafPn3McuXKmcHBwWazZs2yVb94+eWXzXbt2pmVKlUyg4ODzRo1aphDhgwxd+/ebZqm96+NnLheL+vWrTP79Onjfv3edttt5qFDh3K8zZIlS0zAfOGFF/I897nyqmYAmH/88YfH8/b++++bDz74oFm5cmUzJCTEbN++vbl27dps550/f77Zpk0bMzQ01IyIiDC7dOli/vzzz9nabdu2zbzlllvMmJgY93M4ePBgMy0tzTTNs9UM1qxZ43G7nP5dEClpDNP0ouq3iEgBPP/88zzzzDPs3bu3yLd0FSkujzzyCFOnTmXfvn05Lq4sjCVLltC5c2c++eQTbr75Zr+eW6SsUpqBiPjFG2+8AUCDBg3IzMzkhx9+4PXXX2fAgAEKZKVMWLVqFb///jtTpkzh3nvv9XsgKyIFo2BWRPwiPDycV199ld27d5Oenk6NGjV44okneOaZZwLdNRG/cC2quvbaa5kwYUKguyMiZyjNQERERERKLZXmEhEREZFSS8GsiIiIiJRaCmZFREREpNS64BaAORwODhw4QFRUVI5bh4qIiIhIYJmmSUpKCtWqVct3450LLpg9cOAA8fHxge6GiIiIiORj3759+ZZ3vOCCWde2ffv27SM6OrpA58jMzGTx4sV0794dm83mz+5JMdNYlh0ay7JDY1l2aCzLjuIey+TkZOLj473abvmCC2ZdqQXR0dGFCmbDw8OJjo7Wm7OU01iWHRrLskNjWXZoLMuOQI2lNymhWgAmIiIiIqWWglkRERERKbUUzIqIiIhIqXXB5cyKiIhI6WSaJllZWdjt9kB35YKTmZlJUFAQaWlpfnv+bTYbVqu10OdRMCsiIiIlXkZGBomJiZw6dSrQXbkgmaZJbGws+/bt81udfsMwqF69OpGRkYU6j4JZERERKdEcDge7du3CarVSrVo1goODtfFRMXM4HJw8eZLIyMh8NzHwhmmaHD58mL///pt69eoVaoZWwWwRsztMVu86RlJKGlWiQmlduyJWi96AIiIi3srIyMDhcBAfH094eHigu3NBcjgcZGRkEBoa6pdgFqBy5crs3r2bzMxMBbMl1aJfExn35TYST6S5j8WVC2VMn0b0bBwXwJ6JiIiUPv4KoqRk8Nfsul4VRWTRr4kMn73eI5AFOHgijeGz17Po18QA9UxERESk7FAwWwTsDpNxX27DzOE617FxX27D7siphYiIiIh4S8FsEVi961i2GdlzmUDiiTRW7zpWfJ0SERER7A6TlTuP8sXG/azcebRUTix16tSJkSNHBrobJYZyZotAUkrugWxB2omIiEjhFfdalvxyQgcNGsTMmTN9Pu+8efOw2WwF7JXT4MGDOX78OPPnzy/UeUoCBbNFoEpUqF/biYiISOG41rKcPw/rWssydUALvwe0iYln18fMnTuXZ599lh07driPhYWFebTPzMz0KkitWLGi/zpZBijNoAi0rl2RuHKh5PZ5zMD5SbB1bb0YRURECsI0TU5lZHn1k5KWyZgFW/NcyzJ2wTZS0jK9Op9pepeaEBsb6/4pV64chmG4L6elpVG+fHk+/vhjOnXqRGhoKLNnz+bo0aPcdtttVK9enfDwcJo0acKcOXM8znt+mkGtWrV4/vnnueuuu4iKiqJGjRq8/fbbBXtiz1i6dCmtW7cmJCSEuLg4nnzySbKystzXf/rppzRp0oSwsDBiYmLo2rUrqampACxZsoTWrVsTERFB+fLlufLKK9mzZ0+h+pMXzcwWAavFYEyfRgyfvT7bda4Ad0yfRqo3KyIiUkCnM+00evZbv5zLBA4mp9Fk7GKv2m8b34PwYP+EUE888QQvv/wyM2bMICQkhLS0NFq2bMkTTzxBdHQ0X3/9NXfccQcXX3wxbdq0yfU8L7/8Ms899xxPPfUUn376KcOHD6dDhw40aNDA5z7t37+f3r17M3jwYGbNmsVvv/3G3XffjWEYPP/88yQmJnLbbbcxadIkbrjhBlJSUli+fLl7u+Hrr7+eu+++mzlz5pCRkcHq1auLdJMLBbNFpGfjOKYOaMHTn//K0dQM9/FY1ZkVERGRM0aOHMmNN97ocezRRx91//3AAw+waNEiPvnkkzyD2d69ezNixAjAGSC/+uqrLFmypEDB7JQpU4iPj+eNN97AMAwaNGjA/v37GT16NBMmTCAxMZGsrCxuvPFGatasCUCTJk0AOHbsGCdOnODaa6+lTp06ADRs2NDnPvhCwWwR6tk4jtqVIugxeTlhNivTB1+uHcBERET8IMxmZdv4Hl61Xb3rGINnrMm33cw7L/cqBTDMVvDdqs7XqlUrj8t2u50XXniBuXPnsn//ftLT00lPTyciIiLP8zRt2tT9tyudISkpqUB92r59O23btvWYTW3Xrh0nT57k77//plmzZnTp0oUmTZrQo0cPunfvzs0330yFChWoWLEigwcPpkePHnTr1o2uXbvSr18/4uKKbhJPObNFLDrMmcidaXdwxcUKZEVERPzBMAzCg4O8+mlfr7JXa1na16vs1fn8+ZX5+UHqyy+/zKuvvsrjjz/ODz/8wMaNG+nRowcZGRm5nMHp/IVjhmHgcDgK1CfTNLM9RleesGEYWK1WEhIS+Oabb2jUqBH//e9/qV+/Prt27QJgxowZrFy5knbt2jF37lwuueQSVq1aVaC+eEPBbBGLDHFOfmc5TNKzCvaiEhERkYJzrWUBsgW0JW0ty/Lly7nuuusYMGAAzZo14+KLL+aPP/4o1j40atSIFStWeCx0W7lyJVFRUVx00UWAM6i98sorGTduHBs2bCA4OJjPP//c3b558+Y8+eSTrFixgsaNG/Phhx8WWX8VzBaxiHMSxE+mZ+XRUkRERIqKay1LbDnPspix5UKLpCxXQdWtW5eEhARWrFjB9u3buffeezl48GCR3NeJEyfYuHGjx8/evXsZMWIE+/bt44EHHuC3337jiy++YOzYsYwYMQKLxcIvv/zC888/z9q1a9m7dy/z5s3j8OHDNGzYkF27dvHkk0+ycuVK9uzZw+LFi/n999+LNG9WObNFzGIxiAi2kpph52RaFpUiQwLdJRERkQtSz8ZxdGsUy+pdx0hKSaNKVGiJW8vy73//m127dtGjRw/Cw8O55557uP766zlx4oTf72vJkiU0b97c45hrI4eFCxfy2GOP0axZMypWrMhdd93lXpgWHR3NsmXLmDx5MsnJydSsWZOXX36ZXr16cejQIX777Tfee+89jh49SlxcHPfffz/33nuv3/vvomC2GESEBDmDWc3MioiIBJTVYtC2Tkyx3+/gwYMZPHiw+3KtWrVyrFdbsWLFfHflWrJkicfl3bt3Z2uzcePGPM8xc+bMPHcf69ixI6tXr3ZfdjgcJCcnA87qBIsWLcrxdlWrVvVINygOSjMoBpGhzs8MCmZFRERE/EvBbDFwLQJLVTArIiIi4lcKZouBaxGYZmZFRERE/EvBbDFQmoGIiIhI0VAwWwyUZiAiIiJSNBTMFgNXMHsyTcGsiIiIiD8pmC0GEa5gNt0e4J6IiIiIlC0KZotBlDtnNjPAPREREREpWxTMFoOIYCsAqZqZFREREfEr7QBWDM6mGShnVkREJKAcdtizAk4egsiqULMdWKyB7pUUgmZmi0GUSnOJiIgE3rYFMLkxvHctfDbE+XtyY+fxImAYRp4/525v66tatWoxefJkv7UrzTQzWwwiVJpLREQksLYtgI8HAqbn8eRE5/F+s6BRX7/eZWJiovvvuXPn8uyzz7Jjxw73sbCwML/e34VKM7PFwFWaK0WluURERPzDNCEj1buftGT45nGyBbLOEzl/LXrC2c6b85k5nSe72NhY90+5cuUwDMPj2LJly2jZsiWhoaFcfPHFjBs3jqyss7HC2LFjqVGjBiEhIVSrVo0HH3wQgE6dOrFnzx4efvhh9yxvQU2dOpU6deoQHBxM/fr1ef/99z2ud/UhLCyMhg0b8tBDD7mvmzJlCvXq1SM0NJSqVaty8803F7gfhaGZ2WLg3jQhQ8GsiIiIX2Seguer+elkJiQfgBfivWv+1AEIjijUPX777bcMGDCA119/nfbt27Nz507uueceAMaMGcOnn37Kq6++ykcffcSll17KwYMH2bRpEwDz5s2jWbNm3HPPPdx9990F7sPnn3/OQw89xOTJk+natStfffUVd955J9WrV6dz584efWjYsCE7d+7kzz//BGDt2rU8+OCDvP/++7Rr145jx46xfPnyQj0nBaVgthi4trNNTc/CNM1CfYISERGR0u8///kPo0ePZtCgQQBcfPHFPPfcczz++OOMGTOGvXv3EhsbS9euXbHZbNSoUYPWrVsDULFiRaxWK1FRUcTGxha4Dy+99BKDBw9mxIgRAIwaNYpVq1bx0ksv0blzZ48+WK1WypcvT+fOnQHYu3cvERERXHvttURFRVGzZk2aN29eyGelYBTMFgNXzmym3SQ9y0GoTasmRURECsUW7pwh9caeFfCBF1+B9//UWd3Am/supHXr1rFmzRr+85//uI/Z7XbS0tI4deoUt9xyC5MnT+biiy+mZ8+e9O7dmz59+hAU5L/Qbfv27e7ZYJcrr7yS1157DcCjDz169KBTp07069eP4OBgunXrRs2aNd3969mzJzfccAPh4YV/bnylnNliEBF89oWnRWAiIiJ+YBjOr/q9+alzNURXA3L7ZtSA6Iuc7bw5nx++YXU4HIwbN46NGze6f7Zs2cIff/xBaGgo8fHx7NixgzfffJOwsDBGjBhBhw4dyMz07wZM539bfO43yOf34dFHH6VTp05kZmYSFRXF+vXrmTNnDnFxcTz77LM0a9aM48eP+7V/3ghoMLts2TL69OlDtWrVMAyD+fPn53ubpUuXeiRL/+9//yv6jhaS1WIQfmbjBJXnEhERKWYWK/R88cyF8wPRM5d7vlCs9WZbtGjBjh07qFu3brYfi8UZnoWFhdG3b19ef/11lixZwsqVK9myZQsAwcHB2O2F24ypYcOG/PTTTx7HVqxYQcOGDd2XXX147bXX+PLLLz36EBQURNeuXZk0aRKbN29m9+7d/PDDD4XqU0EENM0gNTWVZs2aceedd3LTTTfl237Xrl307t2bu+++m9mzZ/Pzzz8zYsQIKleu7NXtAykiJIhTGXYFsyIiIoHQqK+z/NaiJ5yLvVyiqzkDWT+X5crPs88+y7XXXkt8fDy33HILFouFzZs3s2XLFiZMmMDMmTOx2+20adOG8PBw3n//fcLCwqhZsybgrB+7bNky/vWvfxESEkKlSpVyva/9+/ezceNGj2M1atTgscceo1+/frRo0YIuXbrw5ZdfMm/ePL777jsAjz6EhoYyd+5cdx+++uor/vrrLzp06ECFChVYuHAhDoeD+vXrF9lzlpuABrO9evWiV69eXrf/3//+R40aNdzFfxs2bMjatWt56aWXSnwwGxUSxOGUdE6qPJeIiEhgNOoLDa4pETuA9ejRg6+++orx48czadIkbDYbDRo0YOjQoQCUL1+eF154gVGjRmG322nSpAlffvklMTExAIwfP557772XOnXqkJ6ejplHubCXXnqJl156yePYjBkzGDx4MK+99hr/93//x4MPPkjt2rWZMWMGnTp1yrEPjRo14osvviAmJoby5cszb948xo4dS1paGvXq1WPOnDlceumlRfOE5aFULQBbuXIl3bt39zjWo0cPpk2bRmZmJjabLdtt0tPTSU9Pd19OTk4GIDMzs8B5J67b+XJ7V5rBiVPpfs93kYIryFhKyaSxLDs0lmWHv8YyMzMT0zRxOBw4HI5C9sqAmld6Hir0OfM3cOBABg4c6NH/bt260a1bt2xtHQ4Hffv2pW/f7LPFrtu3bt2aDRs2ZDt+vr/++ivXPjkcDu69917uvffeHO/j3D6YpklKSgpRUVE4HA7atWuXY0qBL+PjcDgwTZPMzEysVs8PFL68ZkpVMHvw4EGqVq3qcaxq1apkZWVx5MgR4uList1m4sSJjBs3LtvxxYsXF3rFXUJCgtdt009aAAs//7KW0zu9K7YsxceXsZSSTWNZdmgsy47CjmVQUBCxsbGcPHmSjIwMP/VKCiIlJcVv58rIyOD06dMsW7bMY7MIgFOnTnl9nlIVzELOq+5yOu7y5JNPMmrUKPfl5ORk4uPj6d69O9HR0QXqQ2ZmJgkJCXTr1i3H2eCcLPhnA38kH6Zuwyb0vrx6ge5X/K8gYyklk8ay7NBYlh3+Gsu0tDT27dtHZGQkoaGhfuyheOvcmVl/1ctPS0sjLCyMDh06ZBtX1zfp3ihVwWxsbCwHDx70OJaUlERQUJA7h+R8ISEhhISEZDtus9kK/Y+kL+eICnW2S8sy9Y9zCeSP14OUDBrLskNjWXYUdiztdjuGYWCxWNwr/aV4udIHXOPgDxaLBcMwcnx9+PJ6KVWviLZt22b7qmLx4sW0atWqxP+D59oFLEXVDERERET8JqDB7MmTJ92FgsFZemvjxo3s3bsXcKYIDBw40N1+2LBh7Nmzh1GjRrF9+3amT5/OtGnTePTRRwPRfZ+4dgHTpgkiIiIFk9eKfSl9/DWeAQ1m165dS/Pmzd17+Y4aNYrmzZvz7LPPApCYmOgObAFq167NwoULWbJkCZdddhnPPfccr7/+eokvywXO0lyASnOJiIj4yPXtqy+LgqTkcy3mO7+Sga8CmjPbqVOnPKPymTNnZjvWsWNH1q9fX4S9KhqumdmTGQpmRUREfGG1WilfvjxJSUkAhIeH+20RknjH4XCQkZFBWlqaX3JmHQ4Hhw8fJjw8nKCgwoWjpWoBWGmmNAMREZGCi42NBXAHtFK8TNPk9OnThIWF+e2DhMVioUaNGoU+n4LZYqI0AxERkYIzDIO4uDiqVKmiDTUCIDMzk2XLltGhQwe/LboPDg72yyyvgtli4k4z0MysiIhIgVmt1kLnWIrvrFYrWVlZhIaGlrgKUqWqNFdp5irNpWBWRERExH8UzBaTSOXMioiIiPidgtliEqk0AxERERG/UzBbTFw5s5l2k/Qse4B7IyIiIlI2KJgtJhHBZ5PVU9MVzIqIiIj4g4LZYhJktRBmcwa0Ks8lIiIi4h8KZouRynOJiIiI+JeC2WIUpfJcIiIiIn6lYLYYRYQ40wxUnktERETEPxTMFiNXea4UBbMiIiIifqFgthhp4wQRERER/1IwW4wiFMyKiIiI+JWC2WLkTjNQaS4RERERv1AwW4yUZiAiIiLiXwpmi1Gk6syKiIiI+JWC2WKkTRNERERE/EvBbDGKDFWagYiIiIg/KZgtRkozEBEREfEvBbPF6GyagT3APREREREpGxTMFqOzM7OZAe6JiIiISNmgYLYYnS3NpZlZEREREX9QMFuMXAvATmrTBBERERG/UDBbjCKDncFsht1BRpYjwL0RERERKf0UzBajiBCr+2+V5xIREREpPAWzxSjIaiHU5nzKVZ5LREREpPAUzBYz1ZoVERER8R8Fs8VMwayIiIiI/yiYLWYRCmZFRERE/EbBbDFzz8yqPJeIiIhIoSmYLWZnN05QMCsiIiJSWApmi5l74wQFsyIiIiKFpmC2mClnVkRERMR/FMwWM6UZiIiIiPiPgtliptJcIiIiIv6jYLaYnU0zsAe4JyIiIiKln4LZYhblLs2VGeCeiIiIiJR+CmaLWYQ7Z1YzsyIiIiKFpWC2mKk0l4iIiIj/KJgtZpEhVkDBrIiIiIg/BDyYnTJlCrVr1yY0NJSWLVuyfPnyPNt/8MEHNGvWjPDwcOLi4rjzzjs5evRoMfW28CJUmktERETEbwIazM6dO5eRI0fy9NNPs2HDBtq3b0+vXr3Yu3dvju1/+uknBg4cyJAhQ9i6dSuffPIJa9asYejQocXc84JzleZKUTArIiIiUmgBDWZfeeUVhgwZwtChQ2nYsCGTJ08mPj6eqVOn5th+1apV1KpViwcffJDatWtz1VVXce+997J27dpi7nnBuYLZjCwHmXZHgHsjIiIiUroFBeqOMzIyWLduHaNHj/Y43r17d1asWJHjbdq1a8fTTz/NwoUL6dWrF0lJSXz66adcc801ud5Peno66enp7svJyckAZGZmkplZsPJYrtsV5PbBFtP99/GTaZQPtxWoD+IfhRlLKVk0lmWHxrLs0FiWHcU9lr7cT8CC2SNHjmC326latarH8apVq3Lw4MEcb9OuXTs++OADbr31VtLS0sjKyqJv377897//zfV+Jk6cyLhx47IdX7x4MeHh4YV6DAkJCQW6nc2wkmkafLkogZjQQnVB/KSgYyklj8ay7NBYlh0ay7KjuMby1KlTXrcNWDDrYhiGx2XTNLMdc9m2bRsPPvggzz77LD169CAxMZHHHnuMYcOGMW3atBxv8+STTzJq1Cj35eTkZOLj4+nevTvR0dEF6nNmZiYJCQl069YNm833mdXxm5dwNDWD1u3aUz82qkB9EP8o7FhKyaGxLDs0lmWHxrLsKO6xdH2T7o2ABbOVKlXCarVmm4VNSkrKNlvrMnHiRK688koee+wxAJo2bUpERATt27dnwoQJxMXFZbtNSEgIISEh2Y7bbLZCD0ZBzxEZGsTR1AzS7OjNXUL44/UgJYPGsuzQWJYdGsuyo7jG0pf7CNgCsODgYFq2bJltujohIYF27drleJtTp05hsXh22Wp11m01TTOnm5RIEcHaOEFERETEH/wSzB4/frxAtxs1ahTvvvsu06dPZ/v27Tz88MPs3buXYcOGAc4UgYEDB7rb9+nTh3nz5jF16lT++usvfv75Zx588EFat25NtWrV/PFQioV2ARMRERHxD5/TDF588UVq1arFrbfeCkC/fv347LPPiI2NZeHChTRr1szrc916660cPXqU8ePHk5iYSOPGjVm4cCE1a9YEIDEx0aPm7ODBg0lJSeGNN97gkUceoXz58lx99dW8+OKLvj6MgIrUxgkiIiIifuFzMPvWW28xe/ZswJkSkJCQwDfffMPHH3/MY489xuLFi30634gRIxgxYkSO182cOTPbsQceeIAHHnjA126XKK5g9mS6PcA9ERERESndfA5mExMTiY+PB+Crr76iX79+dO/enVq1atGmTRu/d7Ascm1pezJNM7MiIiIiheFzzmyFChXYt28fAIsWLaJr166AcwGW3a6ZRm9EhjgXraVmKJgVERERKQyfZ2ZvvPFGbr/9durVq8fRo0fp1asXABs3bqRu3bp+72BZFBniLDeRoplZERERkULxOZh99dVXqVWrFvv27WPSpElERkYCzvSD3HJfxVOEa2ZWC8BERERECsXnYNZms/Hoo49mOz5y5Eh/9OeCEKXSXCIiIiJ+4XPO7HvvvcfXX3/tvvz4449Tvnx52rVrx549e/zaubLKvQBMwayIiIhIofgczD7//POEhYUBsHLlSt544w0mTZpEpUqVePjhh/3ewbJIdWZFRERE/MPnNIN9+/a5F3rNnz+fm2++mXvuuYcrr7ySTp06+bt/ZVKkZmZFRERE/MLnmdnIyEiOHj0KwOLFi92luUJDQzl9+rR/e1dGRWhmVkRERMQvfJ6Z7datG0OHDqV58+b8/vvvXHPNNQBs3bqVWrVq+bt/ZZJrZlaluUREREQKx+eZ2TfffJO2bdty+PBhPvvsM2JiYgBYt24dt912m987WBa5gtn0LAeZdkeAeyMiIiJSevk8M1u+fHneeOONbMfHjRvnlw5dCFxpBuBMNSgfHhzA3oiIiIiUXj4HswDHjx9n2rRpbN++HcMwaNiwIUOGDKFcuXL+7l+ZFBxkITjIQkaWg5MKZkVEREQKzOc0g7Vr11KnTh1effVVjh07xpEjR3j11VepU6cO69evL4o+lklR7kVg9gD3RERERKT08nlm9uGHH6Zv37688847BAU5b56VlcXQoUMZOXIky5Yt83sny6KIkCCOpmZwMj0z0F0RERERKbV8DmbXrl3rEcgCBAUF8fjjj9OqVSu/dq4sO7sLmGZmRURERArK5zSD6Oho9u7dm+34vn37iIqK8kunLgSuNIOTKs8lIiIiUmA+B7O33norQ4YMYe7cuezbt4+///6bjz76iKFDh6o0lw8iQqyANk4QERERKQyf0wxeeuklDMNg4MCBZGU5AzGbzcbw4cN54YUX/N7Bsioy1AZAioJZERERkQLzOZgNDg7mtddeY+LEiezcuRPTNKlbty42m43ExERq1KhRFP0scyI1MysiIiJSaAWqMwsQHh5OkyZN3Jc3bdpEixYtsNu1oMkbke7SXApmRURERArK55xZ8Q9XNQOlGYiIiIgUnILZANHMrIiIiEjhKZgNkEiV5hIREREpNK9zZjdv3pzn9Tt27Ch0Zy4kZzdNUDArIiIiUlBeB7OXXXYZhmFgmma261zHDcPwa+fKssjQM2kGGQpmRURERArK62B2165dRdmPC47SDEREREQKz+tgtmbNmkXZjwuOO5hNVykzERERkYLSArAAORvMZga4JyIiIiKll4LZAHEtAEvLdJBldwS4NyIiIiKlk4LZAIk4s50tQKpSDUREREQKRMFsgIQEWQm2Op/+k6poICIiIlIgCmYDyF2eS7VmRURERArEb8Hs9u3bufjii/11uguCK9UgReW5RERERArEb8FsRkYGe/bs8dfpLgiRITZAM7MiIiIiBeV1ndlRo0blef3hw4cL3ZkLTeSZmVltaSsiIiJSMF4Hs6+99hqXXXYZ0dHROV5/8uRJv3XqQhHhrjWrYFZERESkILwOZuvVq8fDDz/MgAEDcrx+48aNtGzZ0m8duxBoS1sRERGRwvE6Z7Zly5asW7cu1+sNw8A0Tb906kLhCmaVMysiIiJSMF7PzL788sukp6fnen2zZs1wOLSTlS/cM7OqMysiIiJSIF4Hs7GxsXlen5WVxYEDB6hRo0ahO3WhiFCagYiIiEih+K0019atW6ldu7a/TndBiNKmCSIiIiKFEvAdwKZMmULt2rUJDQ2lZcuWLF++PM/26enpPP3009SsWZOQkBDq1KnD9OnTi6m3/qVqBiIiIiKF43WaQVGYO3cuI0eOZMqUKVx55ZW89dZb9OrVi23btuWartCvXz8OHTrEtGnTqFu3LklJSWRllc5gUMGsiIiISOEENJh95ZVXGDJkCEOHDgVg8uTJfPvtt0ydOpWJEydma79o0SKWLl3KX3/9RcWKFQGoVatWcXbZr6IUzIqIiIgUitfB7ObNm/O8fseOHT7dcUZGBuvWrWP06NEex7t3786KFStyvM2CBQto1aoVkyZN4v333yciIoK+ffvy3HPPERYWluNt0tPTPaowJCcnA5CZmUlmZqZPfXZx3a6gt3c5swEYJ9OyCn0uKRh/jaUEnsay7NBYlh0ay7KjuMfSl/vxOpi97LLLcq0l6zpuGIbXd3zkyBHsdjtVq1b1OF61alUOHjyY423++usvfvrpJ0JDQ/n88885cuQII0aM4NixY7nmzU6cOJFx48ZlO7548WLCw8O97m9OEhISCnX7/akAQRxLTmXhwoWFOpcUTmHHUkoOjWXZobEsOzSWZUdxjeWpU6e8but1MLtr164CdSY/5wfAeQXFDocDwzD44IMPKFeuHOBMVbj55pt58803c5ydffLJJxk1apT7cnJyMvHx8XTv3j3XrXnzk5mZSUJCAt26dcNmsxXoHAB7j51i0uafyMRK7949CnweKTh/jaUEnsay7NBYlh0ay7KjuMfS9U26N7wOZmvWrFmgzuSmUqVKWK3WbLOwSUlJ2WZrXeLi4rjooovcgSxAw4YNMU2Tv//+m3r16mW7TUhICCEhIdmO22y2Qg9GYc9RIdIZfJ/OdGCxBmG1eD+zLf7lj9eDlAway7JDY1l2aCzLjuIaS1/uo0ALwI4fP87q1atJSkrKtuvXwIEDvTpHcHAwLVu2JCEhgRtuuMF9PCEhgeuuuy7H21x55ZV88sknnDx5ksjISAB+//13LBYL1atXL8hDCagIV9IszkVg5cL0RhcRERHxhc/B7Jdffkn//v1JTU0lKirKIyXAMAyvg1mAUaNGcccdd9CqVSvatm3L22+/zd69exk2bBjgTBHYv38/s2bNAuD222/nueee484772TcuHEcOXKExx57jLvuuivXBWAlWUiQFZvVINNukqpgVkRERMRnPgezjzzyCHfddRfPP/98oRdQ3XrrrRw9epTx48eTmJhI48aNWbhwoTulITExkb1797rbR0ZGkpCQwAMPPECrVq2IiYmhX79+TJgwoVD9CKTIkCD+OZWpXcBERERECsDnYHb//v08+OCDhQ5kXUaMGMGIESNyvG7mzJnZjjVo0KBMrYqMOBPMpiiYFREREfGZz9vZ9ujRg7Vr1xZFXy5IkWc2TtDMrIiIiIjvvJqZXbBggfvva665hscee4xt27bRpEmTbKvN+vbt698elnGuYPZkmoJZEREREV95Fcxef/312Y6NHz8+2zHDMLDb7YXu1IUkMlRb2oqIiIgUlFfB7Pnlt8R/IkIUzIqIiIgUlM85s7NmzSI9PT3b8YyMDHcJLfFeZLByZkVEREQKyudg9s477+TEiRPZjqekpHDnnXf6pVMXkrNpBkrPEBEREfGVz8GsaZoeGyW4/P333x7bzIp3zqYZZAa4JyIiIiKlj9d1Zps3b45hGBiGQZcuXQgKOntTu93Orl276NmzZ5F0siyLcpfm0sysiIiIiK+8DmZdFQ02btxIjx49iIyMdF8XHBxMrVq1uOmmm/zewbLONTObotJcIiIiIj7zOpgdM2YMALVq1eLWW28lNDS0yDp1IYkIsQJaACYiIiJSED5vZzto0CAA1q1bx/bt2zEMg0aNGtG8eXO/d+5CEKU6syIiIiIF5nMwm5SUxL/+9S+WLFlC+fLlMU2TEydO0LlzZz766CMqV65cFP0ssyJUmktERESkwHyuZvDAAw+QnJzM1q1bOXbsGP/88w+//vorycnJPPjgg0XRxzJNO4CJiIiIFJzPM7OLFi3iu+++o2HDhu5jjRo14s0336R79+5+7dyFIFI7gImIiIgUmM8zsw6HA5vNlu24zWbTtrcF4ApmT2XYsTvMAPdGREREpHTxOZi9+uqreeihhzhw4ID72P79+3n44Yfp0qWLXzt3IXCV5gJIzdDsrIiIiIgvfA5m33jjDVJSUqhVqxZ16tShbt261K5dm5SUFP773/8WRR/LtJAgC0EW545qWgQmIiIi4hufc2bj4+NZv349CQkJ/Pbbb5imSaNGjejatWtR9K/MMwyDyNAgjp/K5GRaFmhHYBERERGv+RzMunTr1o1u3br5sy8XrIjgM8GsZmZFREREfOJzmgHA0qVL6dOnD3Xr1qVevXr07duX5cuX+7tvFwzXxgmp6fYA90RERESkdPE5mJ09ezZdu3YlPDycBx98kPvvv5+wsDC6dOnChx9+WBR9LPMi3OW5MgPcExEREZHSxec0g//85z9MmjSJhx9+2H3soYce4pVXXuG5557j9ttv92sHLwRna81qZlZERETEFz7PzP7111/06dMn2/G+ffuya9cuv3TqQuMOZtM0MysiIiLiC5+D2fj4eL7//vtsx7///nvi4+P90qkLTUSIFYDUDM3MioiIiPjC5zSDRx55hAcffJCNGzfSrl07DMPgp59+YubMmbz22mtF0ccyLzLEuaNaSpqqGYiIiIj4wudgdvjw4cTGxvLyyy/z8ccfA9CwYUPmzp3Ldddd5/cOXggiXTOzKs0lIiIi4pMC1Zm94YYbuOGGG/zdlwtWpLs0l4JZEREREV8UeNMEgJMnT+JwODyORUdHF6pDFyJXaa4UBbMiIiIiPvF5AdiuXbu45ppriIiIoFy5clSoUIEKFSpQvnx5KlSoUBR9LPNc1Qw0MysiIiLiG59nZvv37w/A9OnTqVq1KoZh+L1TF5qzdWYVzIqIiIj4wudgdvPmzaxbt4769esXRX8uSBEKZkVEREQKxOc0g8svv5x9+/YVRV8uWEozEBERESkYn2dm3333XYYNG8b+/ftp3LgxNpvN4/qmTZv6rXMXirM7gCmYFREREfGFz8Hs4cOH2blzJ3feeaf7mGEYmKaJYRjY7drFylfu0lwZdhwOE4tFecgiIiIi3vA5mL3rrrto3rw5c+bM0QIwP3HNzAKkZmQRFWrLo7WIiIiIuPgczO7Zs4cFCxZQt27doujPBSkkyEKQxSDLYZKablcwKyIiIuIlnxeAXX311WzatKko+lI2Oeywazls+dT525E9DcMwjHMqGmQWdw9FRERESi2fZ2b79OnDww8/zJYtW2jSpEm2BWB9+/b1W+dKvW0LYNETkHzg7LHoatDzRWjk+TxFhgRx4nQmJ9OVcywiIiLiLZ+D2WHDhgEwfvz4bNdpAdg5ti2AjwcCpufx5ETn8X6zPAJalecSERER8Z3PaQYOhyPXHwWyZzjszhnZ8wNZOHts0WiPlIOIECsAKSrPJSIiIuI1n4NZ8cKeFZ6pBdmYkLzf2e6MyDOLvjQzKyIiIuI9r4PZX375hW+++cbj2KxZs6hduzZVqlThnnvuIT093e8dLJVOHvK5XeSZmVltaSsiIiLiPa+D2bFjx7J582b35S1btjBkyBC6du3K6NGj+fLLL5k4caLPHZgyZQq1a9cmNDSUli1bsnz5cq9u9/PPPxMUFMRll13m830WuciqPrdz7wKmYFZERETEa14Hsxs3bqRLly7uyx999BFt2rThnXfeYdSoUbz++ut8/PHHPt353LlzGTlyJE8//TQbNmygffv29OrVi7179+Z5uxMnTjBw4ECP/pQoNds5qxaQ24YSBkRf5Gx3RoSCWRERERGfeV3N4J9//qFq1bMziUuXLqVnz57uy5dffjn79u3z6c5feeUVhgwZwtChQwGYPHky3377LVOnTs1zlvfee+/l9ttvx2q1Mn/+/DzvIz093SP9ITk5GYDMzEwyMwtW09V1u7xub3R7HutndwIGxjkLwcwzAa69238w7Q6wOwAIC3J+rkg5nVHgfonvvBlLKR00lmWHxrLs0FiWHcU9lr7cj9fBbNWqVdm1axfx8fFkZGSwfv16xo0b574+JSUlW83ZvGRkZLBu3TpGjx7tcbx79+6sWLEil1vBjBkz2LlzJ7Nnz2bChAn53s/EiRM9+umyePFiwsPDve5vThISEvK41kJc7ftp8vcHhGUecx89bavAr9X7k/iXBf5a6D7+934DsLLjrz0sXLirUP0S3+U9llKaaCzLDo1l2aGxLDuKayxPnTrldVuvg9mePXsyevRoXnzxRebPn094eDjt27d3X79582bq1Knj9R0fOXIEu93uMdsLzqD54MGDOd7mjz/+YPTo0SxfvpygIO+6/uSTTzJq1Cj35eTkZOLj4+nevTvR0dFe9/dcmZmZJCQk0K1bt3wC+N7geIasLR8T9NUDmJZgbA9vprktlObntTy+eh8L9m6nfKVYeve+rED9Et95P5ZS0mksyw6NZdmhsSw7inssXd+ke8PrYHbChAnceOONdOzYkcjISN577z2Cg4Pd10+fPp3u3bv71lOcGy2cyzTNbMcA7HY7t99+O+PGjeOSSy7x+vwhISGEhIRkO26z2Qo9GN6dwwYtBsB3z2CkncD2z+9wUctsrcpHOPt4KtOuN3wA+OP1ICWDxrLs0FiWHRrLsqO4xtKX+/A6mK1cuTLLly/nxIkTREZGYrVaPa7/5JNPiIyM9PqOK1WqhNVqzTYLm5SUlG22FpxpDGvXrmXDhg3cf//9gHMDB9M0CQoKYvHixVx99dVe33+xslggvg38sRj2rc4xmI0IPrMATJsmiIiIiHjN500TypUrly2QBahYsaLHTG1+goODadmyZbbci4SEBNq1a5etfXR0NFu2bGHjxo3un2HDhlG/fn02btxImzZtfH0oxSv+TP/2rsrx6shQVTMQERER8ZXXM7NFYdSoUdxxxx20atWKtm3b8vbbb7N3716GDRsGOPNd9+/fz6xZs7BYLDRu3Njj9lWqVCE0NDTb8RKpxhXO3/t+AdOE81IpVGdWRERExHcBDWZvvfVWjh49yvjx40lMTKRx48YsXLiQmjVrApCYmJhvzdlSo1oLsARBSiIc3wsVanpc7aozm5puD0TvREREREoln9MM/G3EiBHs3r2b9PR01q1bR4cOHdzXzZw5kyVLluR627Fjx7Jx48ai76Q/BIdDXDPn3/t+yXa1a2Y2NSMLh8PMdr2IiIiIZBfwYPaCkkferCuYNU1nRQMRERERyZ+C2eLkCmb3rc52VajNgtXizKNNVd6siIiIiFcUzBYn1yKwpK2Q5lkM2DAMIoKdVSJSVJ5LRERExCsKZotTVCyUrwmmA/5ek/3qUGeBYM3MioiIiHhHwWxxO7dE13kiQpwzswpmRURERLyjYLa45bEIzFWeK0XBbMnksMOu5bDlU+dvhxbqiYiIBFpA68xekFzB7N9rwZ4F1rND4C7PpWC25Nm2ABY9AckHzh6LrgY9X4RGfQPXLxERkQucZmaLW5WGEBINmanOhWDn0C5gJdS2BfDxQM9AFiA50Xl824LA9EtEREQUzBY7ixWqX+78e69n3qyC2RLIYXfOyJLTRhZnji0arZQDERGRAFEwGwjuRWCeebOunNmTKs1VcuxZkX1G1oMJyfud7URERKTYKZgNBPciMM+Z2ahQ5cyWOCcP+bediIiI+JWC2UC4qCUYVkj+G0787T7snplN11fWJUZkVf+2ExEREb9SMBsIIZEQ29j59zklus4Gs5mB6JXkpGY7Z9UCjFwaGBB9kbOdiIiIFDsFs4ES78qbXe0+FOUuzaWZ2RLDYnWW38rRmQC35wvOdiIiIlLsFMwGSo0zebP7ss/MatOEEqZRX2j/aPbj0dWg3yzVmRUREQkgbZoQKK6Z2YO/QvpJCInUpgklWWiU87dhAdMB1ZrD0O81IysiIhJgmpkNlHIXQbl4MO2wfy0AYTZnYHQ4JZ2VO49id+RU21QCImm783fdbs7fJ/YrkBURESkBFMwG0jkluhb9msjds9YAcOJ0Jre9s4qrXvyBRb8mBrCD4nbozG5tTW4GDEhNgpNJAe2SiIiIKJgNrDPB7JHtSxk+ez2HT2Z4XH3wRBrDZ69XQBto9iw4vMP590UtIaaO8++DWwLXJxEREQEUzAbWmUVgoYc2YODIdrUryWDcl9uUchBIx/4CezrYwqFCbah6pqyaa7ZWREREAkbBbCBVuRR7UASRnOIS4+8cm5hA4ok0Vu86Vrx9k7OSzgStlRuAxXK2RvChXwPXJxEREQEUzAaWNYijFZoC0MqyI8+mSSlpxdEjyYlr8VfVRmd+nwlmDyqYFRERCTQFswGWHnc5AC0tv+fZrkpUaHF0R3LiSieocqnztyuYPbIDstID0ycREREBFMwGXLUmnQFoZeQczBpAXLlQWteuWIy9Eg9J25y/qzR0/i5XHULLgeOchWEiIiISEApmA8wa3woTC/GWw1TlnxzbjOnTCKvFKOaeCQAZqXBsl/PvqmdmZg0DqjZx/q28WRERkYBSMBtoodEYZ4KkLpG7sl39wk1N6dk4rrh7JS6HfwNMCK8EkVXOHo9V3qyIiEhJoGC2JDhTomtCy1Tm3H0Fr/3rMupUjgDgsBZ+BdahMykGrsVfLu7yXKo1KyIiEkgKZkuC+CsAsOz7hbZ1Yrjusou4r3NdAD74ZS9Z9uw1aKWYuCoZuBZ/uZw7M2uqBrCIiEigKJgtCc7MzHJwM2ScAqB3kzgqRgSTeCKN77Zr29SAcdWYdS3+cqncAAwLnD4GKQeLv18iIiICKJgtGcrFQ1Q15+r4/esACLVZufXyeADeX7U7gJ27wLnTDM6bmbWFQUy9M22UNysiIhIoCmZLAsOA+NbOv/etch/u36YGFgN+/vMofyadDFDnLmCpRyD1zKx45QbZr3enGihvVkREJFAUzJYUNZx5s+xb7T5UvUI4VzeoCsDsVXsC0asLm2uzhAq1ICQy+/VVta2tiIhIoCmYLSniz+TN7vsFHGcXfA1sWxOAz9b9TWp6ViB6duFyb5Zwac7Xx56pNavyXCIiIgGjYLakiG0CQWGQdgJW/hd2LQeHnavqVqJ2pQhS0rOYv3F/oHt5YUnKpSyXi2tm9ugfkHm6ePokIiIiHhTMlhQ7vgHT7vw74Vl471qY3BjLb1/Sv00NAN5fuQdTZaCKz6HztrE9X1QshMeA6TizuYKIiIgUNwWzJcG2BfDxQLBneB5PToSPB3Jb1CZCbRZ+O5jCmt05b3krfuZw5F5j1sUwzlY5UKqBiIhIQCiYDTSHHRY9AeQ04+o8FvHD09zQLBaAWSt3F/x+di2HLZ+6UxgkD8f3QGYqWIMhpk7u7aqeyZvVIjAREZGACAp0By54e1ZA8oE8GpiQvJ97ah1izlpY9OtBkpLTqBId6v19bFvgDJjPvZ/oatDzRWjUt8BdL9Nc+bKV6oPVlnu7c3cCExERkWKnmdlAO3nIq2a1Q07SsmYFshwmc1bv8/78rhSG8wPmMykMbFvgQ2cvIPkt/nJxl+faom1tRUREAkDBbKBFVvW6natM14er95Bpd+RzA7xKYWDRaKUc5MS9+CufYLZyfbAEOatQnPi76PslIiIiHhTMBlrNds6v/DFybxN9EdRsR8/GsVSKDOZQcjrfbfNiRtfLFAb2rPC112VfkpfBbFCIMxUBlDcrIiISAAEPZqdMmULt2rUJDQ2lZcuWLF++PNe28+bNo1u3blSuXJno6Gjatm3Lt99+W4y9LQIWqzN3Fcg1oO30JFishARZ+dflzjJds1Z6sSOYlykMXre7UGSlw5E/nH/nl2YAypsVEREJoIAGs3PnzmXkyJE8/fTTbNiwgfbt29OrVy/27t2bY/tly5bRrVs3Fi5cyLp16+jcuTN9+vRhw4YNxdxzP2vUF/rNgug4z+OWMwuPtn3hzse8rU0NLAas/OsofxxKyfu8PqQwyDmO/O6s+RtSzjkrnh9XeS7NzIqIiBS7gAazr7zyCkOGDGHo0KE0bNiQyZMnEx8fz9SpU3NsP3nyZB5//HEuv/xy6tWrx/PPP0+9evX48ssvi7nnRaBRXxj5Kwz6Cm6a5vx9zxIICoU/E2D12wBcVD6Mrg2dwef7q/KZna3WwllaKleGO4VBznHonMVfRh7pHy7uRWAKZkVERIpbwEpzZWRksG7dOkaPHu1xvHv37qxY4V0Op8PhICUlhYoVK+baJj09nfT0dPfl5ORkADIzM8nMzCxAz3HfrqC3z1P1KzwuWq4ei3XxaMzF/yareluo0pDbW1dn8bZDfLpuH+3rVCQlPYsqUSG0qlkBq+VM8GU6sH4xAos9w73869ywzHXM3u0/mHYHeLOgrAzKaSwtB3/FCtgrNcDhzRjHNMAGmEd3kpV6HIIjiqSvkrcifV9KsdJYlh0ay7KjuMfSl/sJWDB75MgR7HY7Vat6fsVdtWpVDh486NU5Xn75ZVJTU+nXr1+ubSZOnMi4ceOyHV+8eDHh4eG+dfo8CQkJhbq9V8w42kQ3IzZ5E6fev51l9cdgN4KJtllJznBw9+yzKRblg01urOWgWYxJgwOfUv/QAhxY2RHbl1pHlxKWeczj1JviB7HnLwv8tbDoH0cJd+5Yttm5lFjg1yQHuxd699z0CCpHaNYJVn4xnX8i8thkQYpcsbwvpVhoLMsOjWXZUVxjeerUKa/bBnzTBOO8r3FN08x2LCdz5sxh7NixfPHFF1SpUiXXdk8++SSjRo1yX05OTiY+Pp7u3bsTHR1doD5nZmaSkJBAt27dsNnyKKjvLycvx3y3I+VS99E7ZA3fVHuQ5FWbsjU7kWEw43crn7f9i/qHnPVjHddOpm6z28BhJ2vfSjh5CMvK/2I59CtNalXh0g69i77/JVhOYxn03ycBuLTzzTSq0dar81hPzIC/fqRdnWjMFhf2cxooxf6+lCKjsSw7NJZlR3GPpeubdG8ELJitVKkSVqs12yxsUlJSttna882dO5chQ4bwySef0LVr1zzbhoSEEBISku24zWYr9GD44xxeqXARXDcFPrwF6+q3+M5WBWiYrZkJtLNspfGGF5wHOjxGUKuBrt5C3c7OPy0WmHc31s0fYe38pPNySeWwO0uHnTzkXKhWs52zAoSfucfy9HFnuTIgqFoT8HZ8Y5vAXz8SdGS797eRIlFs70spchrLskNjWXYU11j6ch8Bi2KCg4Np2bJltunqhIQE2rXLfUHSnDlzGDx4MB9++CHXXHNNUXez5LikO7S+B4CnMv5LRbJ/Yqlj7Geq7VWCsHOkVh/o/HTO52rYx7lS/8Re2LWkCDtdSNsWwOTG8N618NkQ5+/JjYt217Kk7c7f0RdBWAXvb1dV5blEREQCIaBTcqNGjeLdd99l+vTpbN++nYcffpi9e/cybNgwwJkiMHDgQHf7OXPmMHDgQF5++WWuuOIKDh48yMGDBzlx4kSgHkLx6jae5Kg6VDGO86LtHSzYucKyjb6WFXS3rGGG7UXKGadY47iElU3G574S3xYGTW52/r1hdvH13xeB2obX280SzueqNXtoa8nb1tZhh13LYcunzt/a8U1ERMqQgObM3nrrrRw9epTx48eTmJhI48aNWbhwITVrOrdtTUxM9Kg5+9Zbb5GVlcV9993Hfffd5z4+aNAgZs6cWdzdL362MHZ1eJ0GX11HN+s6NljupZzhmSCd5CjHPRmjmFK+XN7nanEHrJ0G27+CU8cgPPeKEMUu3214Dec2vA2u8X/KQdI5Zbl8UekSZxm0jBQ4vgcq1PJvvwpq2wLnc3nuh4Loas6NOhr1DVy/RERE/CTgyZIjRoxg9+7dpKens27dOjp06OC+bubMmSxZssR9ecmSJZimme3ngghkz2jc8koSrO0BiMYzkDVNqGyc4MqgHdSulE95qLjLoGoTsKfDlk+KqLcFFMhteA8VcGbWaoPKZ7a1LSmpBoGa3Q4Ehx1jz09cdGwlxp6fNPssInIBCXgwK76x4qBLyDZMM3sWgWE45y2fss7ipjeXsWnf8dxPZBjO2VmA9e+XrK/GA7UNr2lC0lbn374Gs+D8cAAlY/OEfGe3cc5uFzboKwkpDGdyq4NmX0+rPVMJmn190edWi4hIiaFgtrTZs4Kw04dyTYe1GFDNOEr8yU3c8tZKPlm7DwC7w2TlzqN8sXE/K3cexe4wocktzq/GD22BxOylvgImUNvwJh+AtBNgWM/OsvrClTd7cIt/+1UQxTG7HYgFejn14UKZfRYRkRwFvM6s+MjL2cirL3Kwap+Dxz7dzMItiWxPTOFgcpr7+rhyoYzp04ieDa6FrfNgw/tQ7bIi6rSParZz5nXmFYyFx/h/G15XvmxMXQjKXs4tXyVpW9uint12BZHnz/y6gsh+s4o+JzeQudUiIlJiaGa2tPFyNnJor7Y81KUeAD/uOOwRyAIcPJHG8NnrWVPxTHmzzZ9A5mm/drXALFbnAqW8nDoGa9717/0WdPGXiyuY/Wc3pHlf7LlIFOXsdnGlMOQnkLnVIiJSYiiYLW1cs5bktkuaAdEXYal1JQ92qUeF8JyLDrvCkId+KYdZLh7ST8D2L93X55iWUJyiYnM+Hn0R1O4ImPDN4/DNE/4LmtyLvy4t2O0jYiAqzvm3KzAOlJrtILR8Hg2cr5MCzW6XlCAyULnVIiJSoijNoLRxzVp+PBBnQHtukHkmwO35AlisrN55lH9OZeZ6KhM4kJzB3w1uJH7za85Ug6b9WPRrIuO+3EbiiRzSEhrHFfoh2B0mq3cdIykljSpRobSuXRGr5bzg/Mf/OH9fdjs0u91zBzDDAj9Phu/Gwi//g3/2wE3vQkikTzuG2R0mv+w6xrojBjG7jnFl0lbnM1gl++5qXqvaGFISnakGNa4o+HkKK+UgZKXn0cCEruMK9vV7SQkiI3LfxtqDv3OrRUSkRFEwWxo16uvMScyxfugL7lzFpJS0XE7gaey+y3gXA2PXMpauWs3w+YezfYHsSkuYOqBFtoDWq+D0DK8C5d0/wV9LwGLD3uEJVv8TRZL9Yqo4QmmNBathwFUPQ4Xa8Pm98Ps3MKMXXD4Ulr7gVU1Vz35Y+eCPX9gW+hshkGuagVePM7Yx/JmA4+Cv/LLzqFfPid+ZJnz5IGSdhop1nL/PfU4MC5gO2LHQuXlGbqsJc5PnjO85ijKIPHnY+YEmPwWdfRYRkVJDwWxp1aivc2FLHrOQVaJCvTrV94khLLc1poN1C1u+noLJLdnanFlOw7gvt9GtUaw7MPNlFnfRr4kMn70+70D50lj4wTkru7fWzdz61s7cz33p9c5gZc6/4OBmZwB3vhwWJOXUj1rGQULIJNUMYfn+EHqet4eE14/zTN7s1vU/c9vPq/J9Tlx8+UCQrw2z4c/vwBoCt30EMXU8XycA71/vXPhX7TK48iHvz515Gn5+Lf92RRlE7loGn90NJw+CxQaOTLJ/S3HGlSO1+EtEpIxTzmxpZrFC7fbO2bXa7bP9p926dkXiyoXmlV1LpchgHuxSl1/K9wbgRstSLDhybG8CiSfSWPTrQRwO0x0UnhvgwdngdNGvie5jdofJuC+35bVkiHFfbsP+54+wdwV2SzD9tl2Z/7njL4chi8GS2+cyzwVJufWjgeEsYfaHWZ1xX/3mkSPsy+NcnuLM9b3YsQfjnOcxp7bnnv+qF3/gtndW8dBHG7ntnVVc9eIPObaFfPKZT+yHb59y/n3101D5EuxYWOloxBf2tqx0NMJe8yrnDD44UzX+/N67c2emwUf9YfcyCHJ9UMrl1dX2/sIHkefXsM1Mhx+fh/f6OgPZSvXh3qXQ732IPu9DgjXY+Xv9e5BxKvu5S6uSUNdXRKSE0cxsGWa1GIzp04jhs9fnll3LhOsbO2cLOz9C+ov/o1rmMdpbtrDU0SzX89734XpsVgOHmed6dh6eu4kFGw9wMsNO4vFT2YLB82+TeOI0h+Y/TzVgrtmNg2TfYjfHGeLkA+DIyuOZOLsgabWjUY79uMTiDGZ/c8STeCKNW6auoGq5UIIsBt9tT8rzcY7+bAsOB4TaLDzx3UmWmzYijHRqGofYbcbl3m+8nK0+ZzY3zxniS2Ods9PpyVD9cmh7f+7tr72Gns03OmdxP70L7vmRRQfCcj93g4rOGe6d34MtHAZ8BqlHsqe6WEOcu8qteRea94fQfLZVzs22BZiLnsA459ymNRjDnuG80PwO6PUiBEdA1UuxX9KbbSsX8tuGlTRo3pZGl16GddrVztzlr0bCDW/5nk5R0mhrYhGRHCmYLeN6No5j6oAW2YKU2PO/9g4K4VidG4j7bSb9rD/mGcxaDMi051/d4HSmnYW/HvS6r1dbNlAtdSunzBBeSbs213auGeKvNx+gV5M4bF4uNJq2aCX/Tcq5/JhrZnaHGQ/A+n3HYZ93/T5+OpMRH653X/49uDpNjV00NPa6g9lz+/3iou10ql+Fi8qHMXZB7rPV5we/+QW+C67cRRNXesF1U1i0LSn39h9s4K3bHqN70nbYv46U925l1KHRnCI0W9sHZq/mp4vfo+qB7yEoDG6f604hsF/Sm99++ZbT/+wnrMJFNGjcEuv0rnBsJ8y7F/71IVicXwB5nUqxbQHmxwMxMT3mfQ17hvN5uWIE9JzoPn42YLcCV8EBiPvpD15r8xKtl98Fm+c6g/vWd+fwTOfPrykgBVUS6vqKiJRQCmYvAD0bx9GtUWy+/yFX6TAEfptJN8s6KpLMMaI9rjdwBsFLHu3Eh7/sZdxX+ZefuqnFRbStU4n9/5zm1e9+z7WdgYNRQZ8C8Km1F0fIf0bvwY82YszdSM/wv5mab2tI2Gdw3JFzdYf65wWz93SoTXyFcNbt+Yf5G/MqQ+VUu1IEGVkO9h8/zXZHTZpadtHQsodvHG2ytX172S7eXrYr33OeG/w2qBrN+K9zD3zjOEqtdRMA2NrgfrbtCWPC11vynFF+bP7vnOz0Ar0O/4uoE78zyfY2D2beR2vLDqpwnCTKs95Rl9dsU6h6YDWmNQTjtjlQuwNwbhAJcBEAccu28vJVr9JuSX/nwrzlL0HHx73POXbYOf3lY4SYJjnFi6YJaZvmEdZ9AliseQb4ty628U3rR2iweRIsehLimkF863yf93MVpLKHL8GvV221OYSISJ4UzF4grBaDtnVi8m5TrSknKlxKuX+2coP1J6bZe7uvc/33OqZPI0JsVhrEnQ10LThobfnNHQCtdjTAcSYd++aW8bStE4PdYfLRmr0cPJGW43/JPS1raWzZjRkcRcPr/w2zduT7mIIsBlkOk29T63AgpCKxHMsxAHKYcJAY6rTsyoPN4nn4440kJae7+xFGGjWMJAB+d8QTVy6UJ3o2xGoxqFslyqtg9vkbmgBw2zur2G7WAKChkfPUbrP4cqSczmLP0VS8mOD2IvA1ecH2DlGcZr2jLjeva45j3eZ8z3vidCajvjnMh8Z9zAn+D9daV9HBsolo4+zs9WkzmDAjg3QziB8av0y7aldRjrzTI/p/DQuuGkOTtU/Bj8+zNqMGw7+P8iqVwr77Z8JOH8w1FddiQNjpg+zb+B32Glfx7Bdb85zZvvO3NqxoeB3G9i+cM5j3LoPIKl4Fkb6mgLhu89yCLcSf3OR+P+yLbMa/+zbJsa1XgbIvdX1rt8+jnYhI2aRgVjyUa3cXfP0It9uWsNWsSRVO5PgfsmtxWbOUZTxrm0U145j7HAfMiozPHMimqA60ru3Me80rf9eKg5FnZmWNK4bTokEd4srtyTXwdc0QL3usM8dPZ/LR6r2M+34gU22TcZhkC2gN4MXMW7m6ThXa1a3EuL6XevTjEuNvLIbJETOao5Rjap9G7sDG9Tjz64vrccaVC+W35JoANLTsybHtvOFXYrUY/PznEfq/+0t+Q0KL+PKkZtjZcSglx+v7WZfQ0bqZdNPG65EPc0WFyhxLzeC3gzm3P1fjatGcojUfJ3Wkf9APROGZhhFmZGCa8E5Wb176pSL8sphaMeEcTM75+XAFkUM31+eHZncSsWkG9X9+mBrGBPaYsdnagjO3evaqPRw5mcH1x2cwLN9ew/99uowFeeZJn5nZTk5ndbPnaHN4Oxz5HT69i29bTGXs17/nGUTmt2Axt/zn+R/+j09ss6gWfM77Ib0i4z8cCLcPc5/f60DZYYdfP/PiGSFbXd+iTI8oEakXIiJnKJgVT41vhm9GU8fxNx8F/8d92AythmF5EXDm5VktBlNa/E2zFZOznSKWY0yxTWZTi4s9/oPLLX+3f+Q66mf97Vws1PY+rxaujenTCFuQhcpRIbSqVZGXHa0ZnjmSMbZZVONsIGE3DayGyXXWFYRFjMyxH5dY/gZgl6VWttk2b/viepxj+jTiidlHAKhuHCGaVJKJyLHtFRfHeBUofzK8Hat3HeO2d5ylvs6dCc/CwjNB7wPwUtYt3HtTL9rWiWHlzqPu9nl5+ppG4LBTc/ZGTDP3NVI3Bv3MrND+JKXa2X007+oAJnAoOZ3LfunMnOCfaWX5nbdsr3JTxhiaWHZnm8E/nWkncedmRgfNoZt1fZ7ndjlurYDNMLzK3X564S4GXPwcA47fRdDu5ez+czSHsv7FFed8m7DmRAOGz17Pf29vTqO4aL75NdGLBYtpvPDNdq6qV5mqUSH8OH8aU2yTs7V1vR+emh9Mt0bOShOuQDmnbzVMLIxbsJVuQZuw/jDO693k7BFVcCUZFGV6RHGkXpy7mUnbulUCFigraBcpHQzTNIt5n9LASk5Oply5cpw4cYLo6Oj8b5CDzMxMFi5cSO/evbHZct4uttTatgA+viOHK878A+5aaOKww+TGmMkHcvxG2MTAiK4GI7dky+Pz+A8iIogrvumFcWwnXP0MdHjM3c7b/zTtDpOrXvyBgyfSMM4LDtLNID4Mfp4wIwNHy7uwXPuKO2KzO0xW/pmEZcEDtDv5LY42w7H0eiHHp8XXerrNPr2KOI7QL/3frDYb5lt71/mcZXu23cG16zHmNBMOsNMRx8DQ11k2uhtWi+HxnOQVKP/0xNWweznWWX1yfNznsg/8khNVr+DtZTv539K/8m0PEGf8wxfBT1PFOO5OWXA5YFbklcybaWrZRf+gH7Bidwa3po0w0vNMGdkzYBVYrF4F7C69LL8wNdhZI/eYGUlF46RHX8ZlDuRbh285tS4WHPwU8mC+qS6jq88Gi5Vlfxyhh2W188PXed9qvJfVnc7WTVxh2Q7AcTMcCxDJqVzziI8TwfyuS+nbvAZrdh/Lcdb3/NfUubx9fec2o+yPc/va1qWoAs6i3gnRl90KS7My/f/lBaa4x9KXeE0zs3KWe6FJTs7897XgfmdB/v3rIJdAFsDII4/PI393wwfOle9hFaGN5xfM3i5cO3f21MTCKsfZHbwM4KHM+3greDKWddOhYi33JgFWi0Gb2hU5fqZsgaXqpbk+Nd72xdXW3NgC/lzMS3U3k9KgIQ3adMQalP3t5m21ibxmwk0TLjYSeaNlors/Ps0opybl+rjPZU1NomJEMB0vqeJVMDvn7jaAwczpS3ks6GOPQBYgjmP8n+3ts7PB9XtjdhnHc29/wvOZk7KljLhK3r5uG8J/6lR2niOfme1KUSE81KUea3cf46c/2rM4YzndreupwEmPtrEcY6ptMsMzR/KD0Ya4cmHsPZZ/fdrL4suRlumgytHV2T5gnMtiQDWOkrHrZ1Y5GtHDspqpOczixnGMJ20fAZBu2phh78mUrD60tWzLMY3GNZtegVT2L3qVll9fg2HkuVQs1/JwBo4cZ6vP/UBVkNQLb/OOC5qjXBQBZ0H64hOVWRPxKwWzcla+C02AtBPOQvTeyqtsVlaGc/tZgKtGQkhUtibeLFyDvIPCG/vci3GyqnPFd8KzUL4GXHqDu01UmjPNgCo5b2Pra1/YtgBj7woAauz7AvZ9Ab/k/h+VV4Gyw07zrS9gGtnXRjmDF4PmW1+EbgPcsztel2XzdtvZM+28zyOOAYedi23f5xhduYLYTKxYBnyGtW5nrECn6+9ixIcZzhnoc1JGDhLD+Mw7uP6WuzzSOvIK2J+77lJ6No5jwBU1+WLDXprM351jOoXFcAbLY2zv0+u6IfS5LN6rme3PzuQ//57wJ/yc7zPIrJD/40hwdSqm7cUgez8MwxmgniaEr9vPo32jJsQeSmHk3Mgc02gSqcgWe216WNfxjO0DKhopTMq6leyvEidXesT9H66nXZ0YalQM599fbKV7LrPE4zMHMmZBCHUqR/Lzn0e8Sr14NWEHl8VXIMxm5en5v+aZTjFmwVZa14rBYZruxXy5pl4UMAg/lzezuAUJ2r09N+Aus3Z+6TkzOREjlzJr9qwszxJ4bXrk+OFY5EKld4Oc5WW9VupfA5UbwE8v59925RSoXB9im5w95vp6bfNcOL4XwivD5QWrAXquvIPC4fDPbvjlf876p1HVoHorjN++JjQr2fmfVKV6he5DQeuBWnHQ1rINrIfAUhVoB5zzleOZDxq+zoR7FSjXbOecFUpOzN7vM2cnupq7tqxPs757VlKVo7lWJwCwYQfr2X+KejaOg9uHccuCK7NXBbjFsyqA1wE7UPfUFuK8mD2td3oLVksN92O04uDyc4OlM7m+Y/o0wurIhM2fUW/r5Nwf4DmCzXSqpe/M8/kwDAgnnRsvdmCtVo4GsdG8uGgHi0+0JiG9VY6B3qPBC7nPPpsRQQsoz0meyboLINcqI9/8epBvztSAzm2W2JXrO/wkdHs13avHB/DGjzs9LueWTjEucyDfJremxYQE79qeaM2ouRtpVasClSNDePqLX/MMwsd9GVqgrbdX7zrmbpNbpZbEE2ms3nXM/eHWl9JzLHoiWyALzvewiYFxXpm1Dd++R7WV47iUo+62hxJiONB2DM17DMpxDPxeHq4Q7UWKg4JZOcvbGborhjsDm81z8giAzjiwDv7XHprcAp2fgoNbsn+9Zs9wpi744eu1PGdPezzvDJ53LITZN0JwOEEnnV+xGwBT2hTua76C1gPN7yvH08ed25d6I4cPJPnOKFuszvv6eKCzjzmFpz1f8Oiz10Gktx+Qzmt3Nghv6VVaR7dGsaz8M4nFy3+he/s2OS4aahjl3ba2DU+tBXsPejaOY17nI1RbOc4ZkJ9xiBgOXv4EzZJ3wetTIHn/mWfNOa2a0yI6hwnp4VUJGzQPNn8MK17Ltx/WM+kf+aXRANS54RlIvxzzq4e5PegHLjH2cZHlCHHGP+625+YF924cS4bdwa/7jjEmcxaQvQrIubPVy+2tiQoP4VBy/kFt42rRWK0WDp44zWUnl+caKLvSOlx5ynkF1a62X2xqzRebDuTbfoptMsNT4IffLqVbo9h80wae7NWAqDAbm/8+zvLfj7jPn2tg7WjNW8t2cuD4af45lcF/vt7u3Qyxjx9KN3z7Hs1WPOi60q2yeZTKKx5kA2QLaIsyR9mX0nPg+2K+ogzCpWzTArACKLMJ7WcWdeU7Q+da1OWehYQcA6Bek2DvStg678xhK5g57SV/3uKyopSRClOvhH9yqt1ayH7sWg7v5b5zmVuXMdBiEETE5D6T6woqq7Vwbslqz8jhRDkY9FXBa43mGFRf5Axkc3k+8v0PxdvnpDD9PiPf96W3fQEIj4GqTWDXEvdXyy7nXyayqvMDXmQs5vzhmJhn5j+dHICBgeF6XRXwOfEm8LD/Oh/z0zsJwpEtncKVc/yU7XH+8/gjWJN+JXH5LOJ25J82tLXbhzRo29vrRYVWi8HKP5KoObtNroviTBNSCOPoZSPAsFBxw5tEcyrXDwOHKc+kmu9wyhrNtoMpzDl1T74L7q5Kf42IEBtpmQ4yHWaeNbHPdW6gnFPe9rlBuKt9boHvmtCreOP25kQEW4n7dSpVVr+Yw7N3Xv/jr4A6V5O85HXKmSdzfU6SjBgqP/O7O+Xg3NSL1jl8m+BNjnJui/lcpedyK8V4/Tml51ztfQ2US8pCwaKcrS7N585vwsDffInXFMwWQJkNZiH/APX8QM+bAOjABvhuHPz1Yx53fF6gXFQcdnilEZzMbZvdQvRjy6fw2RDv21eo7exHZs5b7Hqo3BCS/4b0k3j1QaOg/L3C2tcPSIWQ7/vSXYEj0TkDdh4TMGzhEBQKp3NPR3CzBME1L0Oz2yAoxHls2wLMRU9gnPN+MKMvwjj3/VCI5yTf/3wcdtJfqENw+j85BkCmCabFhgUTzLzr9J7LceO7WJre4nX1DQD7X8u8qpJREHZLCFZH/rPE/8p4xj2Tnd9Ma+Nq0XS4pDKN4yJo+XlHKptHcw2UDxkx/O+yz/ll9wlqJn2fZ+D7XNYdVDBSuNayiost3m/v7a1367yOrU5HgoMMXvxmB23Sf871ca4Nv4q3BrTEMAzumbWWo6m5f0iuEhXCgvuvItRmwTAMJr78Is9n/l+uj/Mp2+P856mn8tx6O69A2dv2Ja2ihs7th8oeuVAwmwcFs17wdYbOmwCoGGfo8lSU/fD23NHVnYGpt/q+CS0G+P5Bo6Qopn579b50L77BI6A1MZy96TcL6veGX6bC4mfyv9OcXifevB+K6jnxdfa5wsWwf03+bXs8D23vA7z8qvnQVvjqYdiX/6Yg1LwSHA7Yt9K7fvsg7aonmGW5kXXfzcl3prV3v3u47rKLYOcSeP+6/E9+/f9YmN6Yyxb2zXP2+dwPFWlmECYWQsnIdbb1OJFMy+pFB+tm2ljy3wnxk6z2vGu/ht/N6nS3rPVpRtlb3paeG1p+GpWiw/ll1zHSsxy5ni86NIhHe9Qn1GYlyDAY//U2jp/KeatxgIoRwUy6qQkOBzwxbzP/5NL2/G8HoGgDZZ075w8P/qLSXFI4jfo68zq9naGzWPMP/AqYO+l3RdkPbxdSjdwM6cnw02T4eXL+57WFOn836usMcnLMr809FSDgSlK/z/TFOK8vxvl9ifLyH+ecXifevB+K6jnx9nXb8wVnKTzTkf9sNcC3T8HOH6HbOHpadtIj9AmMjHNmn12bqvxdDZa/7MxL91anJ52/vQnC7/gC4po6c+zn5b9oNPSnFxli/S8DbZk5Vo9w5QX/n+0tMjb8CSt3Q9J27/o9fxi9Id/FfADJlVoQ3WEEm4JbM33WdKbaJmPmUnruycyhtOgxkANHV8Pm/PfDuyVoObcELecEUQSbaXk+zjG291lnvQI7VndA6E3qRWvLb16Vnos+vJZlh87mdOd27uS0LJ79Ymu+j83lWGoGQ2ety7edq6LGZeMXUzU6lAphNjbvP5HrKgaAp+ZtITTISnCQhac//zXPts9+sZUmF5UnyGrgcJj5bqftqnoBeF0hw2JAepaDsQvyPvfYBdvocEllrIbBWB+qb/hSrcOXfgc6X1nBrOTMm/+QfeFj+aciU5T98GUhVVgFqNvVu2D23L74+kGjpChJ/famL8Xxei2K58Tb/lRt7Ix4DOdr1vh44JnZ6Rxmq+t0gV1L4M8E5w85lIdLPnDeZisGNOwLe1dA6hG8qZLh1QfB2u2dz0/jm+C7MXkH4UGhEBSKNe041jz+n7UYEM1p2PNV7o1yYouAzFSvmkZ2GAFNb6GVw2RkVAdGpJBr6bnNUR2Y0v5icNTk0Oan80x3SDXCiazdBuPvXyiXmZJnYO0KON/r4iA5tjW3vbMq39SLT+6oR4uTS0j/aTbkvzs2T9bYxoKYdkxbdzzfczetHk2lyFD2/3PavVV3XoF1jYphGBjsOVP/Oa+2KWlZpKR51pLOrf2xU5kMnrnGq7ZJKelc+eIP2Z/bPKpeXPLMQqyGhQy7I9+29Z5e6P5QkxcTOJicRqNnv/WqbeKJNLq/spSKkcGczrB7VWLvtrdX4jDxqu25lT0CRcGsFA8fyz+V2n74MuNW0L74+4NGcSlJ/c6vL8X1evX3c1KQfnszW310pzPvffsX+feh2e3QfhRUqndOOoUXVTJ8qahh8SIIv/EdaHCt8wPj9+Py7/elN0LTfhBTD2b18S6n+a8lzsoo+bBEOWe5zlamSCMhvVWOJd+musraWYI40HYMlVc8mOsGIn+2fcFZzcCeiWPZy1iWTsy3Lw3/nosZF8UdkWsYlzk52/WxHON/tsn8ZrmYBp/txXBkEZ7vWZ2aHZxHk0MLuNFWjUaWvdmevXMrUwzu9aDH1tv5Bb8v3tQMwKu2k25qSvWKYSzeepCZK/bk275a+TCy7A6SUtLzbWsxwDCcM7Mm+edi2x1gxxnI5tc2p0DW20WL+bXdeSSVnUdSvW6/evc/XrdNSsk94C0uypktgDKfM1tUSkrOZ3H0w9uFVCXlOSkD/P6+LK1jU9B+5/eaLWi+uS85+L7m63vTviD99vY5zHdRYc7bevuyoMZVZ/bc8nAHiSHx/DqzvuRLux7VeTm9OYprBo1vJm356wSfPpzrLLHdFomtQjwczjtN4/wqDHaHydPPP8/zmZOAvBeXAV63tVoMVu48ysxpr+ebRzx4iLP8mbdtXUG4N+37/ute7A6Trz9+24u2w2hzcUU27j3O0Flr8w1+pw1qhcM0uXvWunzbPtbjEmpXimTrgRO8+ePOfNvfdWUtDAOm/bQ737Zz7r6iSGZmtQAsDwpmA6wA5Z/KdD9KWl9KsSJ5X5bWsSmKfntbreOmadDkZs9jvlTJcNjJ+msZG5d/y2XtexB0cYe8Uy/yO3dBq0d4+xx6s6gwh+fcp7JI3uwA5k21jtDycHFn2LXUu4od1/8PLrvN/TjNMzuX5Vl6buNHMP/e/M9drgZUaQjlqpO5aS5BGbmXIEsPjyXssW0AnP6/RoScOphrUO1ua7FiT0vl2AuXEmP+k2v7Y0Z5Koz4DixW/nnjairm0dYjCM/K4siES/JMA3G1B7xuW9QBflGe298UzOZBwWwJ4O/yT4Xoh0//aRZxX0rEc1KKFdn7srSOjb/7XZJqBvuqqGarzz1/SfjQ421gvfkTmDc0//Od/8HEm9JzvpYo9FZUNbAGw/Hd+bcNjoSsdHDkXiGhwMIrObded9jhxN7829fq4Jz+3rU0/7aNrndutw5krZ6GNTM11xJ7WcHR2Lo9C5YgMr59FltGcq5tM0PKE9z7TAqKI4uMb57Os31WcDS2q0cDkPnDRIIyUvL/oFEE/yaqmoGUbCUld9Jixax5Ffu3JtOs5lWBDVBKynMi2ZXWsSkJ+bglRUGrR3j7HJaUBY5eV+uI9e585y8obNQX47zHaRR08WTX8RAaDX8s9q76RcqB/Nu4ZJzMv825rKHODX28CX5PHXH+eGv3Mu/bbpvv/jMIcl3QZxhgy0yGhY8CEJxP2+CM4zB/uPtYfu1tmcnOCiaALY+2FgPCTh/Mto16ICiYFRGR/BVg2+MSpagDzpLyocebx1mYDyb+WjzZ7n7nuWLqehfM9nzRuRNiwr/zb3v9VKjdEQ5uhjn/yr/9gDPbhXvzzcO1r0KVS2H/WnfAl6fL7wZMWPNu/m0b3+R8bpJ+c1cOyVO15s4azQc35d+2SiPntwUnDzmfl/xUvxww4O/V+bct6pKaXlAwKyIi3ilJNYMLoqQEnEUtv8dZlB9MfD23t8Fv6zN1hX+Zmn/bprc6zx8V61vQ7k3bFoOc567eCla+kX/7Xme2L96xMP+2N77jPPeu5d4Fs92ec/72JgjvNcn5mvBl23Vvz13UJTW9kHN9BxERkZw06gsjf3Xmxt40zfl75JaSH8iKJ9cHk+jzNgiJrlb4Sh2+nNsV/AI5VDB2/nIFv760Lc3ndgX4uRYONpyzrDXb+da2qM8dQApmRUTEN66ZvyY3n93IQEqfovxgcubcWQPms7bmcLIGzM/93L4Ev74G4aXx3KU1CA8gpRmIiIhcqIoy9cKXRba+5DT7mv9cGs/tS0qPr+k/RXnuAFEwKyIiIoHnS2DtaxBeGs9dwoLwElPKMgcKZkVERERKohIUhJeYUpY5UM6siIiIiJRaCmZFREREpNRSMCsiIiIipZaCWREREREptRTMioiIiEippWBWREREREqtC640l2k690VOTk4u8DkyMzM5deoUycnJ2Gw2f3VNAkBjWXZoLMsOjWXZobEsO4p7LF1xmituy8sFF8ympKQAEB8fH+CeiIiIiEheUlJSKFeuXJ5tDNObkLcMcTgcHDhwgKioKAzj/L2GvZOcnEx8fDz79u0jOjrazz2U4qSxLDs0lmWHxrLs0FiWHcU9lqZpkpKSQrVq1bBY8s6KveBmZi0WC9WrV/fLuaKjo/XmLCM0lmWHxrLs0FiWHRrLsqM4xzK/GVkXLQATERERkVJLwayIiIiIlFoKZgsgJCSEMWPGEBISEuiuSCFpLMsOjWXZobEsOzSWZUdJHssLbgGYiIiIiJQdmpkVERERkVJLwayIiIiIlFoKZkVERESk1FIwKyIiIiKlloLZApgyZQq1a9cmNDSUli1bsnz58kB3SfKxbNky+vTpQ7Vq1TAMg/nz53tcb5omY8eOpVq1aoSFhdGpUye2bt0amM5KriZOnMjll19OVFQUVapU4frrr2fHjh0ebTSWpcPUqVNp2rSpuwB727Zt+eabb9zXaxxLr4kTJ2IYBiNHjnQf03iWDmPHjsUwDI+f2NhY9/UldRwVzPpo7ty5jBw5kqeffpoNGzbQvn17evXqxd69ewPdNclDamoqzZo144033sjx+kmTJvHKK6/wxhtvsGbNGmJjY+nWrRspKSnF3FPJy9KlS7nvvvtYtWoVCQkJZGVl0b17d1JTU91tNJalQ/Xq1XnhhRdYu3Yta9eu5eqrr+a6665z/8eocSyd1qxZw9tvv03Tpk09jms8S49LL72UxMRE98+WLVvc15XYcTTFJ61btzaHDRvmcaxBgwbm6NGjA9Qj8RVgfv755+7LDofDjI2NNV944QX3sbS0NLNcuXLm//73vwD0ULyVlJRkAubSpUtN09RYlnYVKlQw3333XY1jKZWSkmLWq1fPTEhIMDt27Gg+9NBDpmnqfVmajBkzxmzWrFmO15XkcdTMrA8yMjJYt24d3bt39zjevXt3VqxYEaBeSWHt2rWLgwcPeoxrSEgIHTt21LiWcCdOnACgYsWKgMaytLLb7Xz00UekpqbStm1bjWMpdd9993HNNdfQtWtXj+Maz9Lljz/+oFq1atSuXZt//etf/PXXX0DJHseggN57KXPkyBHsdjtVq1b1OF61alUOHjwYoF5JYbnGLqdx3bNnTyC6JF4wTZNRo0Zx1VVX0bhxY0BjWdps2bKFtm3bkpaWRmRkJJ9//jmNGjVy/8eocSw9PvroI9avX8+aNWuyXaf3ZenRpk0bZs2axSWXXMKhQ4eYMGEC7dq1Y+vWrSV6HBXMFoBhGB6XTdPMdkxKH41r6XL//fezefNmfvrpp2zXaSxLh/r167Nx40aOHz/OZ599xqBBg1i6dKn7eo1j6bBv3z4eeughFi9eTGhoaK7tNJ4lX69evdx/N2nShLZt21KnTh3ee+89rrjiCqBkjqPSDHxQqVIlrFZrtlnYpKSkbJ9UpPRwrdTUuJYeDzzwAAsWLODHH3+kevXq7uMay9IlODiYunXr0qpVKyZOnEizZs147bXXNI6lzLp160hKSqJly5YEBQURFBTE0qVLef311wkKCnKPmcaz9ImIiKBJkyb88ccfJfp9qWDWB8HBwbRs2ZKEhASP4wkJCbRr1y5AvZLCql27NrGxsR7jmpGRwdKlSzWuJYxpmtx///3MmzePH374gdq1a3tcr7Es3UzTJD09XeNYynTp0oUtW7awceNG90+rVq3o378/Gzdu5OKLL9Z4llLp6els376duLi4kv2+DNjSs1Lqo48+Mm02mzlt2jRz27Zt5siRI82IiAhz9+7dge6a5CElJcXcsGGDuWHDBhMwX3nlFXPDhg3mnj17TNM0zRdeeMEsV66cOW/ePHPLli3mbbfdZsbFxZnJyckB7rmca/jw4Wa5cuXMJUuWmImJie6fU6dOudtoLEuHJ5980ly2bJm5a9cuc/PmzeZTTz1lWiwWc/HixaZpahxLu3OrGZimxrO0eOSRR8wlS5aYf/31l7lq1Srz2muvNaOiotwxTkkdRwWzBfDmm2+aNWvWNIODg80WLVq4ywJJyfXjjz+aQLafQYMGmabpLDkyZswYMzY21gwJCTE7dOhgbtmyJbCdlmxyGkPAnDFjhruNxrJ0uOuuu9z/jlauXNns0qWLO5A1TY1jaXd+MKvxLB1uvfVWMy4uzrTZbGa1atXMG2+80dy6dav7+pI6joZpmmZg5oRFRERERApHObMiIiIiUmopmBURERGRUkvBrIiIiIiUWgpmRURERKTUUjArIiIiIqWWglkRERERKbUUzIqIiIhIqaVgVkRERERKLQWzIiIXMMMwmD9/fqC7ISJSYApmRUQCZPDgwRiGke2nZ8+ege6aiEipERToDoiIXMh69uzJjBkzPI6FhIQEqDciIqWPZmZFRAIoJCSE2NhYj58KFSoAzhSAqVOn0qtXL8LCwqhduzaffPKJx+23bNnC1VdfTVhYGDExMdxzzz2cPHnSo8306dO59NJLCQkJIS4ujvvvv9/j+iNHjnDDDTcQHh5OvXr1WLBgQdE+aBERP1IwKyJSgv373//mpptuYtOmTQwYMIDbbruN7du3A3Dq1Cl69uxJhQoVWLNmDZ988gnfffedR7A6depU7rvvPu655x62bNnCggULqFu3rsd9jBs3jn79+rF582Z69+5N//79OXbsWLE+ThGRgjJM0zQD3QkRkQvR4MGDmT17NqGhoR7Hn3jiCf79739jGAbDhg1j6tSp7uuuuOIKWrRowZQpU3jnnXd44okn2LdvHxEREQAsXLiQPn36cODAAapWrcpFF13EnXfeyYQJE3Lsg2EYPPPMMzz33HMApKamEhUVxcKFC5W7KyKlgnJmRUQCqHPnzh7BKkDFihXdf7dt29bjurZt27Jx40YAtm/fTrNmzdyBLMCVV16Jw+Fgx44dGIbBgQMH6NKlS559aNq0qfvviIgIoqKiSEpKKuhDEhEpVgpmRUQCKCIiItvX/vkxDAMA0zTdf+fUJiwszKvz2Wy2bLd1OBw+9UlEJFCUMysiUoKtWrUq2+UGDRoA0KhRIzZu3Ehqaqr7+p9//hmLxcIll1xCVFQUtWrV4vvvvy/WPouIFCfNzIqIBFB6ejoHDx70OBYUFESlSpUA+OSTT2jVqhVXXXUVH3zwAatXr2batGkA9O/fnzFjxjBo0CDGjh3L4cOHeeCBB7jjjjuoWrUqAGPHjmXYsGFUqVKFXr16kZKSws8//8wDDzxQvA9URKSIKJgVEQmgRYsWERcX53Gsfv36/Pbbb4Cz0sBHH33EiBEjiI2N5YMPPqBRo0YAhIeH8+233/LQQw9x+eWXEx4ezk033cQrr7ziPtegQYNIS0vj1Vdf5dFHH6VSpUrcfPPNxfcARUSKmKoZiIiUUIZh8Pnnn3P99dcHuisiIiWWcmZFREREpNRSMCsiIiIipZZyZkVESihlgYmI5E8zsyIiIiJSaimYFREREZFSS8GsiIiIiJRaCmZFREREpNRSMCsiIiIipZaCWREREREptRTMioiIiEippWBWREREREqt/wftc5Sr8UXCswAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAq8AAAGHCAYAAACedrtbAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB1A0lEQVR4nO3dd3hTZfsH8O9Jmqa7pUBpyyhlW5YgiCxB2VPByVAQnOBAXxXHq4AL1FfEHyi8KvNVwMEQFYEqskGWyCgCMspo2dCWzrQ5vz+eJm2a0YyT2e/nunolOTk5eZKTwt0793M/kizLMoiIiIiI/IDK2wMgIiIiIrIXg1ciIiIi8hsMXomIiIjIbzB4JSIiIiK/weCViIiIiPwGg1ciIiIi8hsMXomIiIjIbzB4JSIiIiK/weCViIiIiPwGg1eiADBkyBCEhobi+vXrVvcZMWIENBoNLly4YPdxJUnC5MmTjbc3bNgASZKwYcOGSh87evRo1K9f3+7nKu+zzz7DggULzLafOnUKkiRZvM/dJk+eDEmSoFKpcOLECbP7c3NzERUVBUmSMHr0aIvHOHDgACRJgkajQWZmpsV9unfvDkmSLP7Y+34eP34cWq0W27dvt/flKeK3335DREQEzp07Z9f+o0ePRkREhJtHpcx76k6Gz9bly5e9PRQivxDk7QEQkevGjh2LlStXYvHixRg3bpzZ/VlZWVixYgUGDhyIWrVqOf08bdu2xfbt25GSkuLKcCv12WefoUaNGmZBYEJCArZv346GDRu69fltiYiIwPz58/H222+bbP/uu++g0+mg0WisPvbLL78EABQXF2PRokWYOHGixf0aNGiAr7/+2my7Vqu1a4wvvvgievXqhY4dO9q1v1J69OiBW2+9Fa+99hoWLlzo0eeujKvvKRH5DgavRAGgX79+SExMxLx58ywGr0uWLEF+fj7Gjh3r0vNERUXhtttuc+kYrtBqtV59fgB44IEHsHDhQkyZMgUqVdmXV3PnzsWQIUOwatUqi48rLCzE119/jdatW+Py5cuYN2+e1eA1NDTU6dd5+PBhrFy5EmvWrHHq8a4aP348HnjgAbzzzjuoW7euV8ZgiSvvKRH5FpYNEAUAtVqNUaNGYc+ePThw4IDZ/fPnz0dCQgL69euHS5cuYdy4cUhJSUFERATi4uJw5513YvPmzZU+j7WygQULFqBp06bQarW46aabsGjRIouPnzJlCjp06IDY2FhERUWhbdu2mDt3LmRZNu5Tv359HDp0CBs3bjT7atda2cCWLVvQo0cPREZGIiwsDJ06dcLPP/9sNkZJkvD777/jqaeeQo0aNVC9enUMHToUGRkZlb52gzFjxuDMmTNITU01bjt69Ci2bNmCMWPGWH3cypUrceXKFTz66KMYNWqU8TFKmz17NuLj49GrVy+z+9asWYMePXogOjoaYWFhuOmmmzB16lTj/Yav8f/++2/06dMH4eHhSEhIwLRp0wAAO3bsQJcuXRAeHo4mTZpYzK4OGjQIERER+OKLL+we86FDh9CjRw+Eh4ejZs2aePrpp5GXl2e8v0ePHmjWrJnJ5wQAZFlGo0aNMGDAALufyxbDZyQ1NRWPPPIIYmNjER4ejkGDBlksFZk3bx5at26NkJAQxMbGYsiQITh8+LDZfn/88QcGDRqE6tWrIyQkBA0bNsSECRPM9rtw4QKGDRuG6Oho1KpVC2PGjEFWVpYir40okDB4JQoQY8aMgSRJmDdvnsn2tLQ07Ny5E6NGjYJarcbVq1cBAJMmTcLPP/+M+fPno0GDBujevbtdtawVLViwAI888ghuuukmLFu2DP/+97/x9ttvY/369Wb7njp1Ck888QS+/fZbLF++HEOHDsUzzzxj8hX8ihUr0KBBA7Rp0wbbt2/H9u3bsWLFCqvPv3HjRtx5553IysrC3LlzsWTJEkRGRmLQoEH45ptvzPZ/9NFHodFosHjxYnzwwQfYsGEDRo4caffrbdy4Mbp27WryPs+bNw/169dHjx49rD5u7ty50Gq1GDFihPFczZ071+r+xcXFZj96vb7S8f3888+4/fbbTbLChufv378/9Ho95syZgx9//BHPPvsszp49a7KfTqfD0KFDMWDAAPzwww/o168fXn31Vbz22msYNWoUxowZgxUrVqBp06YYPXo09uzZY/L44OBgi388WKPT6dC/f3/06NEDK1euxNNPP43//ve/eOCBB4z7PPfcczhy5Ah+++03k8f+8ssvOH78OMaPH2/Xc9n7no4dOxYqlQqLFy/GjBkzsHPnTnTv3t2kpnzq1KkYO3YsmjdvjuXLl+OTTz7B/v370bFjRxw7dsy439q1a9G1a1ecPn0a06dPxy+//IJ///vfFmvP77nnHjRp0gTLli3DK6+8gsWLF+P555+367URVSkyEQWMbt26yTVq1JCLioqM2/71r3/JAOSjR49afExxcbGs0+nkHj16yEOGDDG5D4A8adIk4+3ff/9dBiD//vvvsizLcklJiZyYmCi3bdtW1uv1xv1OnTolazQaOSkpyepYS0pKZJ1OJ7/11lty9erVTR7fvHlzuVu3bmaPOXnypAxAnj9/vnHbbbfdJsfFxck5OTkmr6lFixZynTp1jMedP3++DEAeN26cyTE/+OADGYCcmZlpdayyLMuTJk2SAciXLl2S58+fL2u1WvnKlStycXGxnJCQIE+ePFmWZVkODw+XR40aZfLYU6dOySqVSn7wwQeN27p16yaHh4fL2dnZJvt269ZNBmDxZ+zYsTbHeOHCBRmAPG3aNJPtOTk5clRUlNylSxeT97miUaNGyQDkZcuWGbfpdDq5Zs2aMgB57969xu1XrlyR1Wq1/MILL5gd5/XXX5dVKpV848YNm+M1PN8nn3xisv3dd9+VAchbtmyRZVl8Vho0aCDfddddJvv169dPbtiwoc3XJMv2v6eGz0jF34OtW7fKAOR33nlHlmVZvnbtmhwaGir379/fZL/Tp0/LWq1WHj58uHFbw4YN5YYNG8r5+flWx2f4bH3wwQcm28eNGyeHhIRU+vqIqhpmXokCyNixY3H58mVj3WVxcTG++uordO3aFY0bNzbuN2fOHLRt2xYhISEICgqCRqPBb7/9ZvErT1uOHDmCjIwMDB8+HJIkGbcnJSWhU6dOZvuvX78ePXv2RHR0NNRqNTQaDd58801cuXIFFy9edPj15ubm4o8//sC9995rMmtdrVbjoYcewtmzZ3HkyBGTxwwePNjkdqtWrQAA6enpdj/vfffdh+DgYHz99ddYvXo1zp8/b7XDACDKNvR6vUlZwZgxY5Cbm2sxO9ywYUPs2rXL7OeNN96wOS5D+UNcXJzJ9m3btiE7Oxvjxo0zOU+WSJKE/v37G28HBQWhUaNGSEhIQJs2bYzbY2NjERcXZ/F9i4uLg16vx/nz520+l8GIESNMbg8fPhwA8PvvvwMAVCoVnn76afz00084ffo0ANFRYc2aNXa9JsCx97TieDp16oSkpCTjeLZv3478/Hyzc163bl3ceeedxgzx0aNHcfz4cYwdOxYhISGVjtHSZ7OgoMCp3w2iQMbglSiA3HvvvYiOjsb8+fMBAKtXr8aFCxdMJmpNnz4dTz31FDp06IBly5Zhx44d2LVrF/r27Yv8/HyHnu/KlSsAgPj4eLP7Km7buXMnevfuDQD44osvsHXrVuzatQuvv/46ADj83ABw7do1yLKMhIQEs/sSExNNxmhQvXp1k9uG2eaOPH94eDgeeOABzJs3D3PnzkXPnj2RlJRkcV+9Xo8FCxYgMTERt9xyC65fv47r16+jZ8+eCA8Pt1g6EBISgnbt2pn9WHsOA8NrqBgoXbp0CQBQp06dSl9bWFiY2eODg4MRGxtrtm9wcDAKCgosjr/8eGwJCgoyOyeGz075czdmzBiEhoZizpw5AIBPP/0UoaGhNuuMK47J3vfU2ufZMB7DpbXPneF+R953QJnPJlFVwG4DRAEkNDQUw4YNwxdffIHMzEzMmzcPkZGRuO+++4z7fPXVV+jevTtmz55t8ticnByHn8/wn62lDFvFbUuXLoVGo8FPP/1kEhytXLnS4ec1qFatGlQqlcWeqYYsZI0aNZw+vi1jxozBl19+if3791tswWTw66+/GrOTFYMTQEyCSktLU6T9mOG1GuqaDWrWrAkAZvWt7mJ4fnve++LiYly5csXkvTF8dspvi46OxqhRo/Dll1/ixRdfxPz58zF8+HDExMQoO3hY/zw3atTIZFzWPneG1+3p952oqmDmlSjAjB07FiUlJfjwww+xevVqPPjggwgLCzPeL0mSWW/L/fv3O9XQvmnTpkhISMCSJUtMZoKnp6dj27ZtJvtKkoSgoCCo1Wrjtvz8fPzvf/8zO65Wq7Ur2xQeHo4OHTpg+fLlJvvr9Xp89dVXqFOnDpo0aeLw67JHx44dMWbMGAwZMgRDhgyxut/cuXOhUqmwcuVK/P777yY/htdecZKds5KSkhAaGorjx4+bbO/UqROio6MxZ84csxn77nDixAlUr17d7p7CFYP/xYsXAxCLC5T37LPP4vLly7j33ntx/fp1PP3004qMt7LxbNu2Denp6cbxdOzYEaGhofjqq69M9jt79izWr19vnLjXpEkTNGzYEPPmzUNhYaFbxkpUFTHzShRg2rVrh1atWmHGjBmQZdmst+vAgQPx9ttvY9KkSejWrRuOHDmCt956C8nJySguLnbouVQqFd5++208+uijGDJkCB577DFcv34dkydPNvvqdcCAAZg+fTqGDx+Oxx9/HFeuXMF//vMfi03iW7ZsiaVLl+Kbb75BgwYNEBISgpYtW1ocw9SpU9GrVy/ccccdePHFFxEcHIzPPvsMBw8exJIlS+yqh3SWrW4BgPh6+YcffkCfPn1w1113Wdzn448/xqJFizB16lTjAgf5+fnYsWOHxf1t9SoNDg5Gx44dzR4bERGBjz76CI8++ih69uyJxx57DLVq1cI///yDv/76C7NmzbL5Ohy1Y8cOdOvWza73Pjg4GB999BFu3LiB9u3bY9u2bXjnnXfQr18/dOnSxWTfJk2aoG/fvvjll1/QpUsXtG7d2u4xOfKe7t69G48++ijuu+8+nDlzBq+//jpq165t7KEcExODN954A6+99hoefvhhDBs2DFeuXMGUKVMQEhKCSZMmGY/16aefYtCgQbjtttvw/PPPo169ejh9+jTWrl1rM2NPRDZ4d74YEbnDJ598IgOQU1JSzO4rLCyUX3zxRbl27dpySEiI3LZtW3nlypXyqFGjzLoDoJJuAwZffvml3LhxYzk4OFhu0qSJPG/ePIvHmzdvnty0aVNZq9XKDRo0kKdOnSrPnTtXBiCfPHnSuN+pU6fk3r17y5GRkTIA43EsdRuQZVnevHmzfOedd8rh4eFyaGiofNttt8k//vijyT6GmeS7du0y2W7tNVVUvtuALeW7DcyYMUMGIK9cudLq/nPmzDGZ4W9rZjwAWafT2Xz+uXPnymq1Ws7IyDC7b/Xq1cYuB2FhYXJKSor8/vvvG+8fNWqUHB4ebva4bt26yc2bNzfbnpSUJA8YMMBk2z///GPWscAaw/Pt379f7t69uxwaGirHxsbKTz31lNVOBQsWLJAByEuXLq30+OXHb897aviMrFu3Tn7ooYfkmJgYY1eBY8eOmR33yy+/lFu1aiUHBwfL0dHR8l133SUfOnTIbL/t27fL/fr1k6Ojo2WtVis3bNhQfv755433W/tsGcZT/neDiGRZkmUPfIdEREQeUVBQgHr16uFf//qX1RW83OmNN97AokWLcPz4cQQFKf/l3j333IMdO3bg1KlTNpfidYahZ/GuXbvQrl07RY9NRMphzSsRUQAJCQnBlClTMH36dOTm5nr0ua9fv45PP/0U7733nqKBa2FhIbZv345PPvkEK1aswEsvvaR44EpE/oM1r0REAebxxx/H9evXceLECau1wu5w8uRJvPrqq8Y+rUrJzMxEp06dEBUVhSeeeALPPPOMoscnIv/CsgEiIiIi8hssGyAiIiIiv8HglYiIiIj8BoNXIiIiIvIbAT9hS6/XIyMjA5GRkW5tVk5EREREzpFlGTk5OUhMTIRKZTu3GvDBa0ZGBurWrevtYRARERFRJc6cOYM6derY3Cfgg9fIyEgA4s2Iiopy6hg6nQ7r1q1D79692VvQz/FcBg6ey8DBcxk4eC4Dh6fPZXZ2NurWrWuM22wJ+ODVUCoQFRXlUvAaFhaGqKgo/jL6OZ7LwMFzGTh4LgMHz2Xg8Na5tKfEkxO2iIiIiMhvMHglIiIiIr/B4JWIiIiI/EbA17wSEREFOlmWUVxcjJKSEq+OQ6fTISgoCAUFBV4fC7lG6XOpVqsRFBSkSNtSBq9ERER+rKioCJmZmcjLy/P2UCDLMuLj43HmzBn2Vvdz7jiXYWFhSEhIQHBwsEvHYfBKRETkp/R6PU6ePAm1Wo3ExEQEBwd7NWjU6/W4ceMGIiIiKm00T75NyXMpyzKKiopw6dIlnDx5Eo0bN3bpmAxelaQvAdK3ATcuABG1gKROgErt7VEREVGAKioqgl6vR926dREWFubt4UCv16OoqAghISEMXv2c0ucyNDQUGo0G6enpxuM6i8GrUtJWAWsmAtkZZduiEoG+7wMpg703LiIiCngMFMkfKPU55addCWmrgG8fNg1cASA7U2xPW+WdcREREREFGAavrtKXiIwrZAt3lm5b84rYj4iIiIhcwuDVVenbzDOuJmQg+5zYj4iIyFfpS4CTm4ED34tLP0i6dO/eHRMmTPD2MOx25MgRxMfHIycnBwCwYMECxMTEeHwco0ePxt13363oMWfNmoXBgz1TJsng1VU3Lii7HxERkaelrQJmtAAWDgSWjRWXM1q4rext0KBB6Nmzp8X7tm/fDkmSsHfvXpefZ8GCBZAkCTfddJPZfd9++y0kSUL9+vXN7svPz0e1atUQGxuL/Px8s/vr168PSZLMfqZNm2ZzPK+//jrGjx+PyMhIp19T/fr1MWPGDKcf7y6PPfYYdu3ahS1btrj9uRi8uiqilrL7EREReZIX5m2MHTsW69evR3p6utl98+bNw80334y2bdsq8lzh4eG4ePEitm/fbvY89erVs/iYZcuWoUWLFkhJScHy5cst7vPWW28hMzPT5OeZZ56xOo6zZ89i1apVeOSRR5x/MXZasGABunfv7vbnKU+r1WL48OGYOXOm25+LwaurkjqJrgKw1ldPAqJqi/2IiIjcTZaBolz7fgqygV9ehu15GxPFfvYcT7Z0HHMDBw5EXFwcFixYYLI9Ly8P33zzDcaOHYsrV65g2LBhqFOnDsLCwtCyZUssWbLE4bcjKCgIw4cPx7x584zbzp49iw0bNmD48OEWHzN37lyMHDkSI0eOxNy5cy3uExkZifj4eJOf8PBwq+P49ttv0bp1a9SpU8fqPsePH8ddd92FWrVqISIiAu3bt8evv/5qvL979+5IT0/H888/b8z2KqGwsBDPPvss4uLiEBISgi5dumDXrl3G+zds2ABJkvDbb7+hXbt2CAsLQ6dOnXDkyBGT4wwePBgrV660mK1WEltluUqlFu2wvn0YIoAt/4tb+qHqO439XomIyDN0ecB7iQodTBYZ2Wl17dv9lbN27RYUFISHH34YCxYswJtvvmkMwr777jsUFRVhxIgRyMvLwy233IKJEyciKioKP//8Mx566CE0aNAAHTp0cOhVjB07Frfffjs++eQThIWFYcGCBejbty9q1TL/VvT48ePYvn07li9fDlmWMWHCBJw4cQINGjRw6Dkr2rRpE9q1a2dznxs3bqB///545513EBISgoULF2LQoEE4cuQI6tWrh+XLl6N169Z4/PHH8dhjj7k0nvJefvllLFu2DAsXLkRSUhI++OAD9OvXD3v27EFUVJRxv9dffx0fffQRatasiSeffBJjxozB1q1bjfe3a9cOOp0OO3fuRLdu3RQbX0XMvCohZTBw/yIgKsF0e1Si2M4+r0RERCbGjBmDU6dOYcOGDcZt8+bNw9ChQ1GtWjXUrl0bL774Im6++WY0aNAAzzzzDPr06YPvvvvO4ee6+eab0bBhQ3z//feQZRkLFizAmDFjLO47b9489OvXz1jz2rdvX5OsrcHEiRMRERFh8lP+tVR06tQpJCba/qOidevWeOKJJ9CyZUs0btwY77zzDho0aIBVq0TpRmxsLNRqtUnW11W5ubmYPXs2PvzwQ/Tr1w8pKSn44osvEBoaiv/9738m+7777rvo1q0bUlJS8Morr2Dbtm0oKCgw3h8eHo6YmBicOnXK5XHZwsyrUlIGA80GAKlvAttnAXXaA2PWMuNKRESepQkDXrPVBaec9G3A1/dWvt+I7+0rf1OHAAU5dj11s2bN0KlTJ8ybNw933HEHjh8/js2bN2PdunUAgJKSEkybNg3ffPMNzp07h8LCQhQWFtr8at6WMWPGYP78+ahXr54xwzlr1iyTfUpKSrBw4UJ88sknxm0jR47E888/jylTpkCtLvs//aWXXsLo0aNNHl+7dm2rz5+fn1/pqlK5ubmYMmUKfvrpJ2RkZKC4uBj5+fk4ffq0zcedPn0aKSkpxtvFxcXQ6XSIiIgweR1z5swxe+zx48eh0+nQuXNn4zaNRoP27dvj6NGjJvu2atXKeD0hQSTsLl68aFI7HBoairy8PJvjdRWDVyWp1EC920TwarhNRETkSZIEBNsZ4DW8U3xLmJ0Jy3Wvkri/4Z32/Z+m1zsyUowdOxZPP/00Pv30U8yfPx9JSUno0aMHAOCjjz7Cxx9/jBkzZqBly5YIDw/HhAkTUFRU5NBzGIwYMQIvv/wyJk+ejIcffhhBQeYh0Nq1a3Hu3Dk88MADJttLSkqwbt069OvXz7itRo0aaNSokd3PX6NGDVy7ds3mPi+99BLWrl2L//znP2jUqBFCQ0Nx7733VvqaExMTsW/fPuPt5cuXY9myZfj666+N28p//V+eXFqnXLF+VpZls20ajcZ43XCfvsI5v3r1KmrWrGlzvK5i2YDSwqqLy7yr3h0HERFRZQzzNgCYTzx2/7yN+++/H2q1GosXL8bChQvxyCOPGIOizZs346677sLIkSPRunVrNGjQAMeOHXP6uWJjYzF48GBs3LjRasnA3Llz8eCDD2Lfvn0mPyNGjLA6cctebdq0QVpams19Nm/ejNGjR2PIkCFo2bIl4uPjzb6CDw4ORkmJaQ/eoKAgNGrUyPgTFxeH0NBQs22WNGrUCMHBwSYtrnQ6Hfbs2YMmTZo49BqPHz+OgoICtGnTxqHHOYrBq9KMwesV746DiIjIHl6ctxEREYEHHngAr732GjIyMky+hm/UqBFSU1Oxbds2HD58GE888QTOnz/v0vMtWLAAly9fRrNmzczuu3TpEn788UeMGjUKLVq0MPkZNWoUVq1ahUuXLhn3z8nJwfnz501+srOzrT53nz59sH37drPAs7xGjRph+fLl2LdvH/766y8MHz7cLLNZv359bNq0CefOncPly5edeBdMhYeH46mnnsJLL72ENWvWIC0tDY899hjy8vLw0EMPOXSszZs3o0GDBmjYsKHL47KFwavSQmPFZcF1oKTYq0MhIiKyS8pgYMJBYNRPwD1zxeWEAx6ZcDx27Fhcu3YNPXv2NKmdfOONN9C2bVv06dMH3bt3R3x8vMurQoWGhqJ69eoW71u0aBHCw8ONZQvl3XHHHYiMjDSZwPTmm28iISHB5Ofll1+2+tz9+/eHRqMxaX1V0ccff4xq1aqhU6dOGDRoEPr06WPW7/att97CqVOn0LBhQ8W+np82bRruuecePPTQQ2jbti3++ecf/PLLLw6v/rVkyRJFuyBYI8mynU3Z/FR2djaio6ORlZVltd6jMjqdDqtXrzZ+8GwqKQbeLv3FePEfIMK9dR/kGIfOJfk0nsvAwXPpvIKCApw8eRLJycmVTgbyBL1ej+zsbERFRUGlYn6sos8++ww//PAD1q5d6+2hVMrRc3nw4EH06NEDR48eRXR0tMV9bH1eHYnXOGFLaeogICRGZF7zrjB4JSIiIgDA448/jmvXriEnJ8elJWJ9UUZGBhYtWmQ1cFUSg1d3CKsugtd8TtoiIiIiISgoCK+//rq3h+EWvXv39thzMafvDpy0RUREROQWDF7dIax00haDVyIiIiJFMXh1B2ZeiYjIgwJ87jUFCKU+pwxe3cGYeWXNKxERuY+hO4O7l+MkUoLhc+pqVxFO2HIHrrJFREQeoFarERMTg4sXLwIAwsLCzJb09CS9Xo+ioiIUFBSwVZafU/JcyrKMvLw8XLx4ETExMVCrXVuxjcGrO4Sy5pWIiDwjPj4eAIwBrDfJsoz8/HyEhoZ6NYgm17njXMbExBg/r65g8OoOrHklIiIPkSQJCQkJiIuLg06n8+pYdDodNm3ahNtvv50LTvg5pc+lRqNxOeNqwODVHQzBK/u8EhGRh6jVasWCA1fGUFxcjJCQEAavfs6XzyULUtyBmVciIiIit2Dw6g6GbgMFWUCJd7/CISIiIgokDF7dISQGQGlxc/41b46EiIiIKKAweHUHdRAQGiOus10WERERkWIYvLoL616JiIiIFMfg1V3Y65WIiIhIcV4NXjdt2oRBgwYhMTERkiRh5cqVJvfLsozJkycjMTERoaGh6N69Ow4dOuSdwTqKmVciIiIixXk1eM3NzUXr1q0xa9Ysi/d/8MEHmD59OmbNmoVdu3YhPj4evXr1Qk5OjodH6gQGr0RERESK8+oiBf369UO/fv0s3ifLMmbMmIHXX38dQ4cOBQAsXLgQtWrVwuLFi/HEE09YfFxhYSEKCwuNt7OzswGIlSKcXXnE8DhHHq8KiYEaQEnuZei9vOIJlXHmXJJv4rkMHDyXgYPnMnB4+lw68jySLMuyG8diN0mSsGLFCtx9990AgBMnTqBhw4bYu3cv2rRpY9zvrrvuQkxMDBYuXGjxOJMnT8aUKVPMti9evBhhYWFuGbsljS78hOYZ3+J0bGf8mWQ50CYiIiIiIC8vD8OHD0dWVhaioqJs7uuzy8OeP38eAFCrVi2T7bVq1UJ6errVx7366qt44YUXjLezs7NRt25d9O7du9I3wxqdTofU1FT06tXL7iXSpH1XgYxvUadaKBL693fqeUl5zpxL8k08l4GD5zJw8FwGDk+fS8M35fbw2eDVQJIkk9uyLJttK0+r1UKr1Zpt12g0Lr/5Dh0jMg4AoCq4BhV/gX2OEp8H8g08l4GD5zJw8FwGDk+dS0eew2dbZcXHxwMoy8AaXLx40Swb65OME7a4SAERERGRUnw2eE1OTkZ8fDxSU1ON24qKirBx40Z06tTJiyOzk7HPK4NXIiIiIqV4tWzgxo0b+Oeff4y3T548iX379iE2Nhb16tXDhAkT8N5776Fx48Zo3Lgx3nvvPYSFhWH48OFeHLWdDJnXwiygRAeo+fUJERERkau8Grzu3r0bd9xxh/G2YaLVqFGjsGDBArz88svIz8/HuHHjcO3aNXTo0AHr1q1DZGSkt4Zsv9AYABIAWWRfI/2g1IGIiIjIx3k1eO3evTtsdeqSJAmTJ0/G5MmTPTcopajUQGg1IP+q+GHwSkREROQyn615DQhcZYuIiIhIUQxe3SnMMGmLwSsRERGREhi8uhMzr0RERESKYvDqTmFsl0VERESkJAav7sSFCoiIiIgUxeDVnUJZ80pERESkJAav7sSaVyIiIiJFMXh1JwavRERERIpi8OpOhuA1nzWvREREREpg8OpO7DZAREREpCgGr+5kyLwWZgPFRd4dCxEREVEAYPDqTiHRgFT6FrN0gIiIiMhlDF7dSaUGQquJ6ywdICIiInIZg1d3Y69XIiIiIsUweHU3tssiIiIiUgyDV3dj8EpERESkGAav7mZol8UJW0REREQuY/Dqbuz1SkRERKQYBq/uxrIBIiIiIsUweHU3Bq9EREREimHw6m7G4JVlA0RERESuYvDqbuzzSkRERKSYIG8PIOAx8+qf9CVA+jbgxgUgohaQ1EmsmEZERERexeDV3QzdBopygOIiICjYu+OhyqWtAtZMBLIzyrZFJQJ93wdSBntvXERERMSyAbcLiQGk0reZvV59X9oq4NuHTQNXAMjOFNvTVnlnXERERASAwav7qVSse/UX+hKRcYVs4c7SbWteEfsRERGRVzB49YQwBq9+IX2becbVhAxknxP7ERERkVcwePUE9nr1DzcuKLsfERERKY7BqycwePUPEbWU3Y+IiIgUx+DVE4xlA9e8Ow6yLamT6CoAycoOEhBVW+xHREREXsHg1RM4Ycs/qNSiHRYA8wC29Hbfaez3SkRE5EUMXj2BZQP+I2UwcP8i89KAqESxnX1eiYiIvIqLFHgCg1f/kjIYqNkM+LS9uN3wTmDE98y4EhER+QBmXj3BELxykQL/UXSj7HpQCANXIiIiH8Hg1RPY59X/FGaXXS/I8t44iIiIyASDV08wlg0w8+o3Chi8EhER+SIGr55gyLwW3QCKC707FrKPSeY12/p+RERE5FEMXj1BGw1IpTWTzL76B2ZeiYiIfBKDV09QqYDQauI66179Q/nMa2E2oNd7byxERERkxODVU9guy7+YlArIQFGO14ZCREREZRi8egqDV/9SWKFUgKUDREREPoHBq6cYJm2x16t/qDhJi8ErERGRT2Dw6inGXq8MXv1CYcXglR0HiIiIfAGDV09h2YB/YeaViIjIJzF49RQGr/7FkHkNChWXDF6JiIh8AoNXT2Hw6l8MmdfoOqW3GbwSERH5AgavnhLKmle/Ysi8xtQ1vU1ERERexeDVU4yZVwavPq+4CCguENejS4NXZl6JiIh8AoNXTzF2G2DZgM8rn2U1lg1c98pQiIiIyJRPB6/FxcX497//jeTkZISGhqJBgwZ46623oPfHpToNmVddLqAr8O5YyDZDljU4omxZX7bKIiIi8glB3h6ALe+//z7mzJmDhQsXonnz5ti9ezceeeQRREdH47nnnvP28BwTEg1IakAuEQsVaBK9PSKyxpB51UYBITHiOssGiIiIfIJPB6/bt2/HXXfdhQEDBgAA6tevjyVLlmD37t1eHpkTJEmUDuReEqUDUQxefZYhyxoSJX4ABq9EREQ+wqeD1y5dumDOnDk4evQomjRpgr/++gtbtmzBjBkzrD6msLAQhYWFxtvZ2SIQ0el00Ol0To3D8DhnH28QFBoLKfcSirMvQq7u2rHIOfacSyn3GoIA6IMjoQ8KRxAAuSALxS6ef1KWUr+X5H08l4GD5zJwePpcOvI8DgWvWVlZWLFiBTZv3oxTp04hLy8PNWvWRJs2bdCnTx906tTJ4cHaMnHiRGRlZaFZs2ZQq9UoKSnBu+++i2HDhll9zNSpUzFlyhSz7evWrUNYWJhL40lNTXXp8Z0LgBoA/tz2KzIO33DpWOQaW+ey7pUtaAvgUnYhDu38C3cCKMq5jDWrV3tsfGQ/V38vyXfwXAYOnsvA4alzmZeXZ/e+kizLcmU7ZWZm4s0338TXX3+N+Ph43HrrrahduzZCQ0Nx9epVHDx4EHv27EFSUhImTZqEBx54wKUXYLB06VK89NJL+PDDD9G8eXPs27cPEyZMwPTp0zFq1CiLj7GUea1bty4uX76MqKgop8ah0+mQmpqKXr16QaPROHUMAFB/PxqqIz+hpM8H0Lcb4/RxyHn2nEvVzv9Cnfo69Cl3o6THW9DMbAVZFYTiVzJF+Qf5BKV+L8n7eC4DB89l4PD0uczOzkaNGjWQlZVVabxmV+a1devWePjhh7Fz5060aNHC4j75+flYuXIlpk+fjjNnzuDFF190fOQVvPTSS3jllVfw4IMPAgBatmyJ9PR0TJ061WrwqtVqodVqzbZrNBqX33yXjxEuOg6oC69DzV9qr7J5LnW5AABVaAxUEeKcSfpiaKADNOGeGiLZSYnfbfINPJeBg+cycHjqXDryHHYFr4cOHULNmjVt7hMaGophw4Zh2LBhuHTpkt0DsCUvLw8qlWk3L7Va7Z+tsgAuEesvyncbCA4v6xJRkC1uExERkdfYFbxWFri6ur81gwYNwrvvvot69eqhefPm+PPPPzF9+nSMGeOnX7kzePUPhs4CIVGiTCAkWrQ3K8gCohK8OzYiIqIqzqluA+fOncPWrVtx8eJFsyzos88+q8jAAGDmzJl44403MG7cOFy8eBGJiYl44okn8Oabbyr2HB5lCF7zuUSsTzNmXqPFZUhUWfBKREREXuVw8Dp//nw8+eSTCA4ORvXq1SGVm8AiSZKiwWtkZCRmzJhhszWWX+ESsf6hfJ9XQGReAdNlY4mIiMgrHA5e33zzTbz55pt49dVXzepRqRLGsgFmXn1a+ZpXoCx4ZeaViIjI6xyOPvPy8vDggw8ycHUGM6/+oWLm1RDEFlz3ynCIiIiojMMR6NixY/Hdd9+5YyyBz5B51eUBunzvjoWsM8u8xojLApYNEBEReZvDZQNTp07FwIEDsWbNGrRs2dKsL9f06dMVG1zA0UYBqiBAXyxKB6Jre3tEZIm1mleWDRAREXmdw8Hre++9h7Vr16Jp06YAYDZhi2yQJCA0Fsi9KEoHGLz6nhIdUFyaFTdmXg1lAwxeiYiIvM3h4HX69OmYN28eRo8e7YbhVAFh1cuCV/I95UsDOGGLiIjI5zhc86rVatG5c2d3jKVq4EIFvq2wNEDVhAPq0r/t2CqLiIjIZzgcvD733HOYOXOmO8ZSNYRVE5f517w7DrKsYr0rwMwrERGRD3G4bGDnzp1Yv349fvrpJzRv3txswtby5csVG1xAYubVt1XsNFD+OoNXIiIir3M4eI2JicHQoUPdMZaqgcGrb7OZeWXZABERkbc5tTwsuYDBq2+zlHll2QAREZHPcLjm9eTJkzh27JjZ9mPHjuHUqVNKjCmwhRpW2eISsT7JYua19HpxPlBc5PkxERERkZHDwevo0aOxbds2s+1//PEH22fZg5lX32ar5rX8/UREROQVDgevf/75p8VWWbfddhv27dunxJgCmzF4ZebVJxlKA8pnXlVqTtoiIiLyEQ4Hr5IkIScnx2x7VlYWSkpKFBlUQAszlA0w8+qTjJnXaNPtxuD1ukeHQ0RERKYcDl67du2KqVOnmgSqJSUlmDp1Krp06aLo4AKSIfNanA8U5Xl3LGTOUs0rwElbREREPsLhbgMffPABbr/9djRt2hRdu3YFAGzevBnZ2dlYv3694gMMONpIQBUE6IuB/KtAcJi3R0TlWap5Bdgui4iIyEc4nHlNSUnB/v37cf/99+PixYvIycnBww8/jL///hstWrRwxxgDiyRx0pYvY+aViIjIpzmceQWAxMREvPfee0qPpeoIqw7cuMDg1RdZzbxywhYREZEvsCvzevr0aYcOeu7cOacGU2Ww44DvqizzylZZREREXmVX8Nq+fXs89thj2Llzp9V9srKy8MUXX6BFixZYvny5YgMMSKHVxCWDV99Tac0rM69ERETeZFfZwOHDh/Hee++hb9++0Gg0aNeuHRITExESEoJr164hLS0Nhw4dQrt27fDhhx+iX79+7h63f2PNq28q0QG60g4QIdZaZTF4JSIi8ia7Mq+xsbH4z3/+g4yMDMyePRtNmjTB5cuXjcvEjhgxAnv27MHWrVsZuNqDwatvKizXv1gbaXofuw0QERH5BIcmbIWEhGDo0KEYOnSou8ZTNTB49U2GrKomDFBrTO9j2QAREZFPcLhVFinAsMpWPmtefYq1eleA3QaIiIh8BINXb2Dm1TcZOw1Em9/HzCsREZFPYPDqDYbMK7sN+JZCK22yACAkxnQfIiIi8goGr97AzKtvKrBRNmDYVpgN6Es8NyYiIiIyweDVG0JLM6/FBUBRnnfHQmVsZl6jzPcjIiIij3Nqedhz585h69atuHjxIvR6vcl9zz77rCIDC2jaSEClAfQ6kX0NDvP2iAiwnXkN0gJBoUBxvtjPsNAEEREReZTDwev8+fPx5JNPIjg4GNWrV4ckScb7JEli8GoPSRKlAzfOi+A1pq63R0QAUHBdXFrKvAJi0taNfE7aIiIi8iKHg9c333wTb775Jl599VWoVKw6cFr54JV8g7FVloVuA4AIam+cZ/BKRETkRQ5Hn3l5eXjwwQcZuLrK2Ov1mnfHQWUKbNS8AmXtsljzSkRE5DUOR6Bjx47Fd999546xVC3GdlnMvPoMW4sUAOz1SkRE5AMcLhuYOnUqBg4ciDVr1qBly5bQaEyX0Zw+fbpigwtobJfleyrLvGq5yhYREZG3ORy8vvfee1i7di2aNm0KAGYTtshODF59DzOvREREPs/h4HX69OmYN28eRo8e7YbhVCGhXGXL59hb81rAmlciIiJvcbjmVavVonPnzu4YS9XCzKvvqTTzyrIBIiIib3M4eH3uuecwc+ZMd4ylajEGr8y8+oQSHaArXe0sxFqrLEPm9bpHhkRERETmHC4b2LlzJ9avX4+ffvoJzZs3N5uwtXz5csUGF9DYbcC3FOaUXddGWt4nJKZ0X5YNEBEReYvDwWtMTAyGDh3qjrFULeXLBmRZrLpF3mMoBdCEAWqN5X04YYuIiMjrnFoelhRgyLyWFIqvq4PDvTueqq6yetfy9zF4JSIi8houk+UtwRGAOlhcZ+mA91XWaQBgtwEiIiIf4HDmNTk52WY/1xMnTrg0oCpDkkTpQE6mCF5j6nl7RFWbPZnX8mUDLPUgIiLyCoeD1wkTJpjc1ul0+PPPP7FmzRq89NJLSo2raigfvJJ32ZV5Lb1PLgGKcgFthPvHRURERCYcDl6fe+45i9s//fRT7N692+UBVSmh1cRl3jXvjoPsy7xqwgBVEKAvFtlXBq9EREQep1jNa79+/bBs2TKlDlc1GFbZ+uc34ORmQF/i3fFUZfZkXiWprHSA7bKIiIi8QrHg9fvvv0dsbKxShwt8aauAf34V1/cvARYOBGa0ENvJ8wpLOwjYyryWv58dB4iIiLzC4bKBNm3amEzYkmUZ58+fx6VLl/DZZ58pOriAlbYK+PZhALLp9uxMsf3+RUDKYK8MrcoyZl6trK5lwF6vREREXuVw8Hr33Xeb3FapVKhZsya6d++OZs2aKTWuwKUvAdZMhFngCpRuk4A1rwDNBgAqtYcHV4XZU/MKsF0WERGRlzkcvE6aNMkd47Dq3LlzmDhxIn755Rfk5+ejSZMmmDt3Lm655RaPjkMx6duA7AwbO8hA9jmxX3JXjw2ryrOn5hUoF7xed+twiIiIyDK7g1e9Xg+9Xo+goLKHXLhwAXPmzEFubi4GDx6MLl26KDq4a9euoXPnzrjjjjvwyy+/IC4uDsePH0dMTIyiz+NRNy4oux8pw+7MK2teiYiIvMnu4HXs2LHQaDT4/PPPAQA5OTlo3749CgoKkJCQgI8//hg//PAD+vfvr9jg3n//fdStW9dkSdr69esrdnyviKil7H6kDLszrzHikt0GiIiIvMLu4HXr1q2YNWuW8faiRYtQXFyMY8eOITo6GhMnTsSHH36oaPC6atUq9OnTB/fddx82btyI2rVrY9y4cXjsscesPqawsBCFhYXG29nZIsjQ6XTQ6XROjcPwOGcfbyKxPYIiE4GcTEgW6l5lSEBUIooT2wNKPB+ZsHYugwqyIAHQqcNsvu8qTQTUAEryrkHP8+NViv5eklfxXAYOnsvA4elz6cjzSLIsW5o5ZCY8PBwHDx5EcnIyAGDo0KGoXbs2Zs6cCQBIS0tD9+7dcfHiRSeGbFlISAgA4IUXXsB9992HnTt3YsKECfjvf/+Lhx9+2OJjJk+ejClTpphtX7x4McLCwhQbmysSru9C+5PifSu/wKjhROxKfgaZMe1dexJZj+o3jiBEdx0FmhhciWgKSIp1Rgs4A/56DEH6QqSmfIg8rfWsd4OL69Dy3Fc4F3Mrdic/7cEREhERBa68vDwMHz4cWVlZiIqy/S2o3cFr9erVsXnzZqSkpAAAEhMT8eGHH2LEiBEAgBMnTqBFixbIy8tzcfhlgoOD0a5dO2zbts247dlnn8WuXbuwfft2i4+xlHmtW7cuLl++XOmbYY1Op0Nqaip69eoFjUbj1DEqkv7+Cep1r0HKKZu8JUfVRkmvdyE3G6j8sSMTUdL7PZeP7e8snkt9MTRT48X9zx8Ry/ZaIe3/BkE/joe+wR0oGfadJ4ZMVrjj95K8g+cycPBcBg5Pn8vs7GzUqFHDruDV7rKB1q1b43//+x+mTp2KzZs348KFC7jzzjuN9x8/fhyJiYnOj9qChIQEY7BscNNNN9lcyUur1UKr1Zpt12g0Lr/5ShzDqOUQoPlg4MQmYPF9gF4HacT3CKqVUvljbUlbBSx7BBVbcUk5mQha9gh7yJYyOZd5OWXbI6oDahvnOFwsxKEqzIGK/zD7BEV/L8mreC4DB89l4PDUuXTkOez+HvmNN97AjBkz0LBhQ/Tp0wejR49GQkKC8f4VK1agc+fOjo20Ep07d8aRI0dMth09ehRJSUmKPo/XqNRAozuAuh3E7TN/uHa8SnvIQvSQ5TK0pgyTr4JCbQeuALsNEBEReZndmdc77rgDe/bsQWpqKuLj43HfffeZ3H/zzTfj1ltvVXRwzz//PDp16oT33nsP999/P3bu3InPP//c2PEgYCR1BNK3AKe3A+0ecf447CHrHHs7DQBcYYuIiMjLHFqkICUlxexrfIPHH39ckQGV1759e6xYsQKvvvoq3nrrLSQnJ2PGjBnGOtuAkdRJXKZbruO1G3vIOsfeHq9AWfDKVllERERe4fAKW542cOBADBwY4JOM6twKSGog6zSQdRaIruPccdhD1jnOZF6LCwBdAaAJcd+4iIiIyAx7J/kCbQSQ0EpcdyX7mtQJiEqEaQOu8iQgqnZZppcERzKvwZEwvr/MvhIREXkcg1dfUa80oDy9zfZ+tqjUQN/3YXnCVmnA1Xea2I/KOJJ5VanKgtwCBq9ERESexuDVVyR1FJeu1r2mDAZq32K+PSqRbbKsKSydfGVP5hXgpC0iIiIvsjt43blzJ0pKylosVVzboLCwEN9++61yI6tq6pUGr5cOA3lXnT9OcRFwqbS9WLX64vLmkcCEAwxcrTFmXqPt29/YLuu6W4ZDRERE1tkdvHbs2BFXrlwx3o6OjsaJEyeMt69fv45hw4YpO7qqJLwGUKOJuH7ahezrmR1A0Q0gPA5oPbxsO0sFrHOk5hVg5pWIiMiL7A5eK2ZaLa0qa+dKs2SNIfua7kLd67F14rJxr7LM6/V0l4YV8BypeQXYLouIiMiLFK15lSRrs9zJLoYuAK5kXo+listGPYFqpSuRXWPwapOjmVctV9kiIiLyFp/v81qlGILXzL+AolwgONyxx18/DVz6W/SMbXiH6EMKANlngRJd5UufVlXOZl4ZvBIREXmcQ8FrWloazp8/D0CUCPz999+4ceMGAODy5cvKj66qiakHRNURwebZXUCD7o493pB1rXsrEFoN0OqBoBDRUD/rLBCbrPiQA4LTNa8sGyAiIvI0h4LXHj16mNS1Gla+kiQJsiyzbEAJSR2BA9+JllmOBq///CouG/cSlyqVCIgvHwWunWLwag0zr0RERH7D7uD15MmT7hwHGdQrDV4dXayguBA4sUFcb9y7bHtMkgheOWnLOoczr6x5JSIi8ha7g9ekpKRK99m3b59d+5ENhrrXs7sdq1NN3wro8oDIBKBWi7LtnLRlm75EtBYDHOjzym4DRERE3uJyt4GsrCx89tlnaNu2LW65xcLKTuSYGk1FvaouT0zcstex0pKBRj2B8uUbMaXBKzOvlpUPQNnnlYiIyOc5HbyuX78eI0eOREJCAmbOnIn+/ftj9+7dSo6talKpnOv3Wr6/a3nMvNpmqHcNCgGCgu17DFtlEREReY1DE7bOnj2LBQsWYN68ecjNzcX9998PnU6HZcuWISUlxV1jrHrqdQSOrBb9Xjs/W/n+V08CV44BqiDzSV7MvNrmaL0rwMwrERGRF9mdee3fvz9SUlKQlpaGmTNnIiMjAzNnznTn2KouQ91r+jZAr698f0OXgXodzes2Dats5V4SvWPJlKOdBgAgJEZcFt0ASooVHxIRERFZZ3fwum7dOjz66KOYMmUKBgwYALVa7c5xVW0JrQFNGFBwXSw6UBlDyUCjnub3hcaUBbQsHTDnVOa13L6ctEVERORRdgevmzdvRk5ODtq1a4cOHTpg1qxZuHTpkjvHVnWpNUCd9uJ6ZS2zdPnAyc3ievkWWeWxdMA6ZzKvao344wJg6QAREZGH2R28duzYEV988QUyMzPxxBNPYOnSpahduzb0ej1SU1ORk5PjznFWPcbSge229zu1FSjOB6JqA3E3Wd6Hk7ascybzCrBdFhERkZc43G0gLCwMY8aMwZYtW3DgwAH861//wrRp0xAXF4fBgwe7Y4xVk6HjwOntQLlVzcyU7zJgbYUzZl6tM2ROHcm8Apy0RURE5CUu9Xlt2rQpPvjgA5w9exZLlixRakwEiLIBVRCQfQ64ftr6fv+kiktrJQNA2aQtZl7NGTOvdi5QYMB2WURERF7h8iIFAKBWq3H33Xdj1apVShyOACA4DEi4WVw/baV04Mpx4OoJQKUBkm+3fixmXq1zpuYVKJd5ZdkAERGRJ9nd53XMmDGV7iNJEubOnevSgKicpI7Aud2iZVbrB83vN5QMJHUCtJHWj1O+5lWWrZcXVEWu1rwy80pERORRdgevCxYsQFJSEtq0aQPZVg0mKadeJ2DbTOuZ12OGkoFelu83iKknLotygPxrQFiscmP0d05nXlk2QERE5A12B69PPvkkli5dihMnTmDMmDEYOXIkYmMZBLlVvdvE5eWjQO5lILxG2X1FecCpLeK6rXpXANCEAhHxwI3zwLWTDF7LY+aViIjIr9hd8/rZZ58hMzMTEydOxI8//oi6devi/vvvx9q1a5mJdZewWKBmafuritnXU5uBkkKRVa3RpPJjsV2WZa7WvLJVFhERkUc5NGFLq9Vi2LBhSE1NRVpaGpo3b45x48YhKSkJN27ccNcYq7ak0pZZFfu9GlfVstEiqzxO2rLM2cwruw0QERF5hdPdBiRJgiRJkGUZer1eyTFReUmdxWX5lbZkuVx/10pKBgyYebXMmHl1sFUWywaIiIi8wqHgtbCwEEuWLEGvXr3QtGlTHDhwALNmzcLp06cRERHhrjFWbYbFCjL/AgpLVzG7fEz0flUHA8ld7TuOv2Re9SViudsD34tLfYl7n6uo9D11uOY1RlwyeCUiIvIouydsjRs3DkuXLkW9evXwyCOPYOnSpahevbo7x0YAEF1b1LVePw2c2Qk06lGWda3fBQgOt+84/pB5TVsFrJkIZGeUbYtKBPq+D6S4YfW2wnJLGnOFLSIiIr9gd/A6Z84c1KtXD8nJydi4cSM2btxocb/ly5crNjgqVa+TCF5PbzcNXu0tGQDKVtnKOiMyjiq14sN0Sdoq4NuHAVSY/JedKbbfv0j5ANZQ7xoUAgRpHXssW2URERF5hd3B68MPPwyJze29I6kjsH+pmLRVeEMsWgCIyVr2iqotlpstKQJyMoHoOu4ZqzP0JSLjWjFwBUq3ScCaV4BmA5QNugucnKwFmHYb4MIPREREHuPQIgXkJfU6ictzu4F/UgG9DqiWDFRvaP8xVGoRsF47JUoHfCl4Td9mWipgRgayz4n97K3xtUehk22ygLLgVdYDRTdsr3BGREREinG62wB5UI3GQGh1oLgAWPuG2Naoh+PZPl+dtHXjgrL72cuVzGtQCKDSlB7Hh0oHPDnhjYiIyAvszrySFx3+EdDliuvZZ8TloRVAcjfH6kCrJQEn4XuTtiJqKbufvVzJvEqSyL7mXRbBqy9ksj094Y2IiMgLmHn1dYaJTMUFptvzrortaavsP5avZl6TOokgC9YyyZKo2U3qpOzzGjKmzmRegXIdB3xglS3D56Ri+YVhwpsjnxMiIiIfxuDVl1U6kQliIpO9Xw0bOg74WuZVpRbZQVv6TlO+Q4Irmdfyj/N22YDSnxMiIiIfxuDVlzkykckevpp5BcTX2gM+Mt8eFOqeNllAuZpXB1fXMvCVXq9Kf06IiIh8GINXX6b0RCZD5jU7AygudGpIbhVVW1xWqw/0eFNc1xeXLZGrNJczr+XaZXmTtya8EREReQGDV1+m9ESm8BqAJgyADFw/4/Sw3OZimris0x7o+i8gobVoC7Z/qXuez5VuA0C5zOt1RYbjNG9NeCMiIvICBq++TOmJTJJUrnTglAIDVNjFw+Iy7iZx2fZhcbl3kVgIQGmuZl61PlLz6qkJb77UhktfAil9C2pf3Q4pfQvreYmIqhAGr77MZCJTxcCk9LajE5mqlQavvjZpCygXvKaIy5b3iZrXS38DZ3cp/3wuZ15jTI/jLZ6Y8Ja2CpjRAlg4EFg2VlzOaOGdLgalYwn66m60S5+NoK/u9t5YiIjI4xi8+rqUwWLCUlSC6faoROcmMvnqpK2SYuDyEXHdkHkNiQaaDxHX9y5U/jmVqnn1duYVKP2cLITZHzkaBSa8+VIbLl8aCxEReQWDV3+QMhiYcBAY9RNwz1xxOeGAcwGJr2Zer54ASooATTgQXa9su6F04OBy5TOcLmdefaRswCCuOQAZUAWJmmEACAoDmg10/pi+1IbLl8ZCRERew+DVX6jUQHJXoOW94tLZr4B9NfNqmKwV1wxQlftY1rsNqNEE0OUBB5cp+5zGzKuft8oyOLtTXNa+Bej+KhAcAeRfAS4cdP6YvtSGy5fGQkREXsPgtarx1YUKKk7WMpAk04lbSpH1QGGOuO5qtwFvt8oyOFMavNZpD6g1ZS3GTvzu/DF9qQ2XL42FiIi8hsFrVWMoG8i/6v2JRuUZM68p5ve1ehBQaYCMvcD5A8o8X+ENGL9q9vduAwaGSW11bxWXDe8Qlyc2OH9MX2rD5UtjISIir2HwWtVoI4HQWHHdl0oHrGVeASCiJtCsv7iuVPbVkC1Va4EgrXPH8KWygcKccn1yS4PXBqXBa/o2QFfg3HE91YbLobFY48GxEBGR1zB4rYp8bdKWrgC4elxct5R5BYC2o8Tl/m8AXb7rz+lqpwGgLHgtKXI+OFTKuT2iFCK6bllnippNgcgEoLgAOLPDueN6og2XI2Np/5iVO51sHUdERH6HwWtV5GuTti4fFYFXaDXrX/k2uEN0ISjIAg7/6PJTSoUudhoAxIQoQ9Dk7ezrmdKSgTrty7ZJEtCgu7h+3IW615TBQNcXzbdrI11vw+WojL3iUhNmuj0izvNjISIir/Cr4HXq1KmQJAkTJkzw9lD8m69lXssvTiBZ+XpapQLajBTXlSgdMASbrmReVSrfaZdl6DRgqHc1aKBA3SsA3DgvLpv0K8uC10zxbLB45Thw+CdxfWwqikeuRFZIHXG7ywsMXImIqgi/CV537dqFzz//HK1atfL2UPyfr2VejZO1LNS7ltdmBCCpgFObRSDjCiUyr4BvdByQ5bLJWnUqBq/dxWXmX0DeVeeOX1IM/P2zuH7bk2U9ZDP2lHVs8ITtswDIQOM+QHwLyEldcDa2tL715CbPjYOIiLzKL4LXGzduYMSIEfjiiy9QrVo1bw/H//ls5rWS4DW6DtCop7juYvZVKigNulzJvALlJm1dd+04rrjyD5B/DQgKAeJbmt4XWau0jlh2PvuavlV0pwiNBZK6iM9PtWRAX+y5nqo3LgF/fi2ud37WuPlyZGmN9KnNIsgmIqKAF+TtAdhj/PjxGDBgAHr27Il33nnH5r6FhYUoLCw03s7OFhkxnU4HnU7n1PMbHufs431OZB1oAMjX01FcVGT9q3oPCbqYBglAcWwTyJW8x1KrEQg6tg7yvsUo7jpR9DN1gOEcluRfgxqAXhOJEhfOqzo4EioAxblXKx27u0intiMIgD6+NUpkCagwDlX926G+mAb9P7+hpOkgh4+vOrhCvFdN+qFELwN6nTjmtZMo+ec36JPvVOaF2BrDjtlQlxRCn9AGJYm3AqW/z9dD60MOiYFUcB3FZ3ZDrn2L28dCygu4f2OrMJ7LwOHpc+nI8/h88Lp06VLs3bsXu3btsmv/qVOnYsqUKWbb161bh7CwMAuPsF9qaqpLj/cVKr0OAyFB0uXht1VLUahxcoUpBQSV5GNA1hkAwLp9Z6A7eM3m/pJcgt5B0QjJvYg/v5mKzJh2Tj1v+t/70RjAycwrOLh6tVPHAIBbswqQAODg7q1IP+Vkyy0XtT69HPUBHC+KRZqF1xKXFY6OAArS1iAVPzv2x4qsR5+Dy6EG8EdOPC6WHj/xWiTaA8jd/zN+13VW4FVYpy4pRO9Dc6AGsFvbGZm//FJ2p6RCprYREgt249jaL3A0ngsU+LNA+TeWeC4DiafOZV5ent37+nTweubMGTz33HNYt24dQkJC7HrMq6++ihdeeMF4Ozs7G3Xr1kXv3r0RFeXcV8Q6nQ6pqano1asXNBrHMn0+68QbQE4GerZrDLm2cwGgEqRzu4H9gBwRj16D77frMarQfcD2/0M7VRpK+r/p0PMZzmX9xOrARaB+s1aod3t/J0YuqH9cDezfi5aN66F5R+eP44qgz6cCAJK7Poj6zSyMoagb5I/+D2FFl9G/401AbAO7jy2d2YGgfVmQtVFod/+/AHWwuCOvA+SPP0VUwVn0v/0Wty4MoNr1JdT7cyFXS0abYW+gTWkrLMO5rHHrvUDqbjQNzkSj/t45B+SagPw3toriuQwcnj6Xhm/K7eHTweuePXtw8eJF3HJL2VeBJSUl2LRpE2bNmoXCwkKo1aY9HbVaLbRa8wyYRqNx+c1X4hg+o1p9ICcDQTnnAE1H743jylEAgFQrxf73tt1oYPv/QXViPVR5F0QtrIPUuhviMqwa1K6c09AYcZyiG64dx1kFWcClvwEAQfU7ApbGoIkB6nYA0rdAc3ozUKup/cc/IiZqSU37QxMSXrY9Oh5IaAVk/gXNmW1AK/v+8HBYSTGwc7YYQ8fx0GjN/4iVGt0BpAKqs7ugknVAsGvfsJD3BNS/sVUcz2Xg8NS5dOQ5fHrCVo8ePXDgwAHs27fP+NOuXTuMGDEC+/btMwtcyQHGSVunvDoMkzZZ9qreUEwckvXA71OBA98DJzcD+hL7j1GgcLcBb7XKOrcHgAzE1BOTs6xp2F1cOjJpS5bLeupaakNl6GTgahsuWw7/ILpihFUHbh5heZ9qDYCoOmKxiNPb3TcWIiLyCT4dvEZGRqJFixYmP+Hh4ahevTpatGjh7eH5N19pl2Vok1WzmWOPM8yq3/cVsGwssHAgMKMFkLbKvscXKtxtwFutss5YaZFVUYPSSVUnN9kf5J/bC2SfBTThQEMLk7LKB6+ybN8xHSHLwNb/E9dvfdx6RrX8YgzuDKSJiMgn+HTwSm7kK+2ynMm8pq0C/phjvj07E/j2YbsCWEVW2Cr/eG9lXq0tTlBR4s0i0C7IAjL+tO/YaSvFZZM+gCbU/P56HQG1Fsg+J9p1Ke3UZiBzHxAUamNZ2FKG4PXkRuXHQUREPsXvgtcNGzZgxowZ3h6G//OFzGvuZSD3orhe0846TH0JsGYiAEuZvtJta16pPLtoCF4V6/PqheBVry+3OEF72/uq1EDy7eK6PUvFyjJwuPSPAGsrV2lCgXodxHV3ZDwNWdc2I4Dw6rb3Nby2zP3OL8ZARER+we+CV1JItfri8voZ7zV3N2RdY5IAbYR9j0nfBmRn2NhBFpnAyprnGzOvLrYJMwavXigbuHJMBM1BoeaLE1jiyFKx5/eLeuigUKBRLxvH7G7/MR1x4RDwT6pYUa3j+Mr3L78YA1fbIiIKaAxeq6rIBNH2SC4RwZ43lM6Sd6hk4IadfTxt7SfrgULRbcCvM69nSksGEtvYt1hDw9Lg9cwfZa/fGkPpRaMetv+wSO4uLh2dMFeZbTPF5U2D7W/tldxNXLLulYgooDF4rapUKiC6rrjurdIBw2StypaFLc/efqI29gvSF0AylBi43G3AizWvxnrXSkoGDKoli64Eep3tzLQsA2k/iOspd9k+ZuLNIntdmAVk7LNvHJXJOgcc+E5cL7cUbKU4aYuIqEpg8FqVeXvSljOTtZI6AVGJAKytEiUBUbXFflZoSkpX8VAHAxr7Fr+wKiRGXOpygRIPL4dob6cBA0kqVzpgo+710t+iJEEdLCZr2aJSA8ldKz+mI/6YDeiLRTs0R5Z7rd8ZkNTAtZPen4ioBH2JyGg70wqOiCiAMXityrw5aUuWncu8qtRA3/dLb1gJYPtOE/tZEVSSL664mnWteAxD+y1PKLc4QaWdBspraEfdq6FkoMEdZWURtiiR8TQEansXATu/FNs6P+fYMbSRQJ3S1eL8vetA2irR+m3hQOdawRERBTAGr1WZNzOvOZkiAJPUQI3Gjj02ZTBw/yIgKsF0e3CE2G5tdnwpY+bV1XpXAFAHiT6oAFBw3fXj2evsbojFCZKAiDj7H5fcDYAk/nDIOW95n8q6DFTUoFwtbZH9a1MblQ/UVj0DFOcDqiBx6ahAKB1IWyVavlWcmOhAKzgiokDG4LUq82bm1ZB1rd4ICDJfzrdSKYOBCQeBUT8BHZ4U2yIT7Qq4NEpmXgHvTNoytMiq28Gxx4XFAgmtxXVLAd6V48CFgyJ4bNrfvmNWb1i2wtWZHY6Nx1qgpi8Gvh3leKBmDF43ilZi/kapVnBERAGMwWtVZmiX5Y0lYo31rg6UDFRkqLfs/qrI4F45atdrCVIy8wp4p13WmT/EpSMlAwaG0gFL/V4NE7XqdxWBrj2cXeHKZqBWytFArXY7QBMG5F0u+wPJnyjVCo6IKIAxeK3KDMHrjQuAzomvaF3hzGQta0JjgHq3ievHUivdXfnMq4c7Duj1wNk94nplixNYUr7fa8VlXY0lA5V0GTA7ZveyY9rLHYFaUDCQ1NnxsfgKJVrBEREFOAavVVloNSA4Uly/ftqzz+3MZC1bGvcWl3YFr4bMq4sLFBh4umzg8hHRmkoTBtRq4fjj63YAgkKAG+fLJn0BovY540+xMECzgY4ds/wKV7lX7HuMuwK1BqX9Xv1x0pYCreCIiAIdg9eqTJK8M2lLrwcuOrFAgS2G4PXkpkqzyEF6N9W8FnqobMC4OEFbMWHMUZqQslZi5UsHDv8oLut1AiJqOnbM8itcnbJzhSt3BWqGLPCprUBxkWOP9TZjKzhrKm8FR0QU6Bi8VnXemLR1/ZSYSa7WArHJyhwz7iYxaag4Hzi1xeauinYbADyfeXV0cQJLLC0V62zJgPGY3c2PaYu7ArW45kBYDdF799wexx7rbSo10Ps92/tU0gqOiCjQMXit6oyZ11Oee05DvWvNpsr9JyxJQONe4vqxdTZ3VbTPa/njeCp4dXRxAkuM2cktIjuZnVE2CewmB0sGKh7T3uBVpQaaD7VyZ2kPX2cCNZWqrIzBH+tes86UXqnQx1hSA/cvtL+FGRFRgGLwWtV5I/NqrHdVqGTAwFj3us58IlI5fp15zb8mal4B5yZrGdRqUZadPLsLOPxT6TFvrSQbakNSJ9Fi69op+/4YKswBDi4T1w211wZRiXb17LXKX/u9Xj8NbJgqrg/6RLSCG/yp+JZCLhHt4IiIqjgnCuYooHij5lWJNlmWNOgmljS9dgq4fAyo2cTibu7r8+qBmldDl4FqyY7XpZanUokA7+D3wN6FQMY+sf2mQc4fUxspAurT20Wf1Vvq295/wzSxWEW1ZODJrUDGXjE5K6JWaSDsQlbeELye2y2CZG2kzd19giwDq18GdHmi7rjtw+IbheSuwKnNwP6lwF9LXCsXISIKAMy8VnXGXq8OBK+urrmuZJus8oLDgfpdxHUbpQPGsgHFMq8eLBsw1ru6UDJgEFpNXO7/piybu32Ways42ZvxPH8Q2DFbXO//H0AbLoK0lveKS1fLSaolic+2vth/eqL+/RNw9BdApQEGfiwCV4PWD4rLg8uA4kLvjI+IyEcweK3qYuqJy8Is8ZV0ZVxdc724CLh8VFxXOvMKmJYOWGEsG9D6YassQ6cBV0oGAHG+dn1pvv3GRdeWIDUErydtrHCl1wM/vyC+Bk+5C2jc07nnsncs/lA6UJgjsq4A0Pk5IK6Z6f3JtwORCWIJ4qNrPT48IiJfwuC1qgsOB8JLv36uLPuqxJrrV4+LbFhwJBBdx7kx22IIXtO3iYDAAo1e6cxrjLgsdHPwqteXzZ53dFlYk+O4cQnS2rcAwRFA3hWxzKwl+74Wk8M04UCfqY4/h73KLxXr69a/C+RkiBKK2180v1+lBlrdL67/tdSzYyMi8jEMXsm+SVtKBTzlFyeQJNv7OqN6QyC2AaDXWc64yXr/7TZw6W/RS1YT7lrJhTuXIFVrbK9wlXcVSH1TXL/jVSC6tuPPYa/6pR0HLh4Ccty0IpWrJTSAWBhi53/F9QEfAZpQy/u1Ki0dOLbO/oUgiIgCEINXsm/SllIBj7sma5XXuI+4tFQ6UJQLyRBsK95tINv6V+VKMLSyqu3k4gQG7l6C1NbX9b9OBvKviuC7w5POHd9e4dWB+Fbi+kk7F05whKslNIAIdn+cAMh6oMW9QKMe1vetlQIktBZ/mB1a7vLwiYj8FYNXsi/zmpNp37EqC3jcNVmrPGO/11Tzllmlq2DJKo1YIlUJxmVmZaDohjLHtORsaX9XVydruXsJUkPwmr7NdHLRmV2iswEADJgusrTuZlwqdoOyx1WihAYAdn4BZO4T9dd9KlmcAABaDxOXfy1xaLhERIGEwSsBMXXF5ekd5l99lhSLGrtf37LvWJUFPOXLBtwlqTOgCRMBd8W6S0M7q5Ao5coWNCGiDyfg3tIB42QtF4NX48pW1l6/i0uQxt0EhMeJ1c4MAXdJMfDT8+L6zSOBpI7OHdtR5etebfT+dYhSJTRZ54D174jrvSaLJXYr0+JesVjBuT3ApaMODJqIKHAweK3q0laV/Qd64WDZV58HlwG75wOzbgFWPAFkn4H1YAewK+ApygOunhTX3Zl51YQAyaUZtwqlA1Jp5lWxelcDd7fLyrsKXDkmrrvaaUClBvq+X3qj4jl1YWUr4yGksqBx7yJRD7rmFeDCATG5rdcU547rjHodReuprDPAjs+cr0stz5USmvI1ssvGAkU54o+RtqPte+6ImkCj0u4M+zlxi4iqJi5SUJUZvvqsmEHKzgC+H1N2O6wG0HG8CE5XPFG60ULWqbKA5/IR8biwGq412LdH416iZ+bRdUDXf5VtNwavCjetD4kGci+5J3jVlwB7Sr9uj0wEQmNcP2bKYLGC1ZqJpoFYVKI4j64uQWoopdj/jfgxaD4ECK/h2rEd8c9vZRn2ta+Jy6hEEbw7+xrtrQU+tVl8C6AqzRGkrTJ/vwHRLkzlQB6h9YPAsbXA/m+BO/7t2GOJiAIAg9eqyuZXn6UkFdD7HeCWR4DgMLFNE2r5P+BGPSsPBjwxWcvA0DLr7E6RtQyLFbcNNa/aKJt5ZIcZgjVDcKyUigFPTobIjLsSfBmkDAaaDRAZQqVWtjKM2VIPWQDYswBoeKfrY7d3HBb/OCutS3V2+Vl7a4E3vi8yzyl3iTKK9W+bjwUA1v1b9Fu2dyxN+4ka2awzQPpWsagDEVEVwj/Zq6pKv/qEmAEd36oscAXEf7ATDoo11++ZC/R6W2w//htwbq/t4xnrXd1YMmAQU1c8j6wHjq83bpYK3FQ24I52WUpNCrJFpVZ2ZSt7/ihytoesYuNwsZdtUieRAbdFEwYER4m66z/mAOvfsjIWOD4WTSjQ/G5xnT1fiagKYvBaVbnSLql8wNP5WTGJRNaLCTm2/gP2ZOYVMO06YGBYuECpNlkGSq+y5c7gy53c2UPWV8ahUgMJrazcKYmfIf8FXv4HGPYN0OCOSg7oxFgMXQfSVopaciKiKoTBa1WlZLukPu+JrzEz94nWP9Z4ok1WeYbSgX9Sy4K8cmUDijLU0KZv8/6kIG9ydw9ZXxjH36uBo2vE9dBY0/uiEsvKEYK0QNO+QJuRyo+l3m2ixV3RDeDIavsfR96hxGIWRGTEmteqytAuKTsTlrN7krjfnnZJkbWAnpPEevXr3xH/cUdV+Fo1/7oItgDzddvdpW4HEVTnXRGrGNVp554JW2mrgEMrSq+vFD+emhTk7iDQUe7uIevtcVw/A6x8Sly/bZyoCa+sZtgdY5EkMXFr4/ui52vLe+1/LCCCJ6VrnckySxP1XP33gaiKY+a1qlK6XdItj4gWTkU5wC8Tze+/9Le4jKpTrqm/m6k1QMPSr2xLW2Yp3irLUJdacXECV+tSfSUIdJS7e8gqNg4A4TUdG0eJTrS3KrgOJLYBek6xr2bYXe9JqwfE5fH1QM55+x+nxMpgZB9P1K0TVUEMXqsyQ7ukqATT7eW/+rSXSgUMnCEaqB9eBRxda3q/JxYnsMRQOmDo91qgYNmAO+tSw2qIbg9WeSgIdJS7e8gqMo5SBdllS+7a4/d3xf7aKODe+UBQsAJjceE9qd5QfLsg64ED39n3GE8EU/yKXPDXunUiP8Dgtaqr2D1g1E/AhAPOfZ0V3wLoOE5c//lFoCi37D5PT9YyMDR0z/gTyLlQVjagxIQtd9WlXj4G/O8uEZQA8GoQ6Awl/yhy1zhqtQBKCoGv7gXSt1d+rH9+BbZ8LK4PngnEJis3Flfek9YPikt7ug54IphyJqurL4GUvgW1r26HlL4lcII5f61bJ/IDrHmlsq8+ldDtFeDgCiDrNLDxg7LVlDw9WcsgshaQcLOYTPbPr5AM3QaUyLy6oy718j/AgoHiMXHNxeIQv7/jnoUE3MldPWSVGkdJEbBkGHDid+Dre4GRy8QkKEuyM4HlpYtztH+0rE2VUmNx5T1pPkSU6Vw4CJw/AMS3tL6vI8GUM/8eONNXt7QeNCg7A+0AIH124NSD2vt7b6nkgzXJRDYxeCVlaSOA/h8CS4cB22eJury4m4ALh8T9ns68AqJ0IHOfKB0w1rwqUHerdF3qleMiU3XjvAjyR60Sq1G1ftA//yNT8o8ipcehCgWGLQGWPAic2AB8dY/lAFZfAix7FMi7DNRqCfR+V/mxuCK0GtCkryjV+Wup7eDVnZMAK83qSiKr22xA2WfXmWDXn4I6RxazUAcBNw0Wr4UTvIgqxeCVlNesP9BsIPD3T8CPzwGdngbyr4r7qjfy/Hia9AE2fSAmtpR+FS+HKNBtoNKODQBUQYBKU/mxrhwXGdecTKDmTcDDq8qWUfWVIDDQaEKBB0sD2JMbSwPY5aIrhSFA+udXIH0LoAkH7lsAaEK8PWpzrYeJ4PXAd2ISmdrCP+v518RysvZwZhKgvVndTzuI35ngCODEejgc7PpTUFe3g/iM6fJt73flGPDdaKBafSC5m1iVTelV4YgCDINXco9+74v/+M/uLM2ulPq0vef/s0lsA4RVB/KulFWPKlE2YJiI8+3DEHWoFv4j1hcD8/sCHZ4E7vy3yExXzB5FxgOL7hJLv9ZsBoz6EYio6fr4qHLBYcCwpcCSB4CTm4BFg8U5yr1sul/bh4EaXvjDyx6NeorP940L4tuO6DplWUlJJdq4/TIRyL1Y+bGcnQRob7b2yjHxU6nSYHflOPHHZ855YO1r8JugTl8C/PisjcC19F+iwbNEidXOL4Brp8SPRVYCeqIqisEruce5vUBxgfl2b/xno1KL/+D3f2PcJF04BEQnuP6fgGEijllGqLYIVk9uBv5aDPwxG/j7Z1ECsO8r030lNSCXADWaMnD1huAwsRLWlz1EVwxLn9s/5oigzpcCJIOgYCCxrViM49dJZdsjagGRCaJkBgCqNwZa3Q/8/l7pDhb+2Or9tnO/E/Zma+98QyyucGKD+D2ozP6l4scqBYM6pUoS9Hpg1TOi/66kFpNYDy6zXbfeeQLw2xTxObPKxZpkogDC4JWUZ6x/s8RLGYSw6iY3g5ber9xXjrYm4tw8XPQA/WkCcP20KF+oSC6dXd3paSAizrWxkHOCtOKrdVt8NeuVtkoErhXduCB+VEFA1xeBri+I11mzmfkfW5JKlNRcOe7cGIwlNNZKB0oXPenyvHj/IuPtC14b9RK/N5eP2NhJgaBOqZIEvR748Rlg39cicL3nS6DFUFHOYSswDg4TfbJtBq+lPLkwiT/VGFOVwlZZpDxfaxGTtgrYMdt8u5K9LW01q2/UA3hiCxAcbuMAErBhWuC0CfI36dtEvbFVPtrWyOYfiqXCqgPdXhaBK2C5Pd6Q/4r7Nv0HuHrC8XGo1ECD7lbutNDazd6FG4Z/I8ZuD2eDOqV63+r14o/UP78SfwwM/VwEroB9i1n42sIkXMyCfBiDV1KeLy1t6iuNws/vN+17a2ksvhgcVRW+9Jl1RKV/KEKMueLnqmIw1fI+MVmopBBY/TIgW5mAaM2FNODAMnG94gp6lnrZOrJwg73BWrgT5Tau/PtgshjDJuDn54G9C0XgOuRzx5fstWdVOG0kUOdWx47rDK4MRj6OwSspz5cyCL6SBfbX4Kiq8KXPrCOU+lxJEjDgI0AdLEoQDv9o/xiKC4Hlj4nAt0lf4KUT9i16Yu/CDfYEdQCw+SPziXaVsfffhwPfmwb0ZlnJQcCeBeK+u+cAre5zbByAfavCFeYAX9/j2HLAjvKVP/iJbGDwSsrzlfXtAd8JGv01OKoqfOkz6wglP1c1GgOdnxPX17wCFN6w79jr3xGLJITVEKuPqYMq/4rcoLSEoXjkSuxOegrFI1eaB7v2ZGnVwaLd2X+7AWf3lN1d2VK1GXvte40rHgdmtBITsda+ZjkraaAJte+YllgN6GsDt40TLcZObQbmdBXZXkD55Xh95Q9+Ihs4YYuUZ7OFlIeXNvWVoLHSnrClE1p8LTiqKnzpM+sIpT9XXf8l+sFeTwc2TgN6v2N7/1NbgG0zxfXBM52bcKhSQ07qgnOHstE6qYvl99hqV4/SWfvVGwHfjASuHgfm9RGt+sJriCDc0iSsWs3F4gDlOpDYJKlFS6u9iyrb0fWJfbYmgLYbA3w7Crh4SLTXaz4UOL3N9Ylm5fnKH/xENjDzSu7hK+vb+0pGzZEaP/IOX/nMOkLpz5UmFOj/H3F9+2eiltWagixgxZMAZNEHt1l/BwbuBEsTzQxZ2lopwOO/i8VR9Drg5xds1Gw+BMxsVxa4BtlaeKL034eJp4AR3wMpd1cySIWyktYmeNVoDDz6K9BmpOgOcfB75etSfeUPfiIbmHkl9/GF9e19KaNWWfbIF4OjqsYXPrOOUvpz1aR32Qp5P78AjF4NqCzkOVa/BGSdAaolA32muvYa7GVrtbmQaOCBr4AtH4ueqRYZfv/1og3Xna8D18+UW0jFyr8PIVFA414iYE9bWfk43ZmVDA4DBv0fcPgnoOC6hR1cbEeY1Em8lwVZ1vcJrcZvicirGLySe/nC0qa+FDT6Y3BU1fjCZ9ZRSn+u+r0PHP8dOL1dNNtvM8L0/oPLRObS0BJKG+H6a1CCJIl+qfbo/JxYfS+xjf3/PvhKVjJ9m5XA1cCF3rcHvrcduAKiJ/K+r0XGncgLGLxS1VD6n3vxiU3Yt3ktbu7aB0ENbvdO0OiPwRH5PiU/V9F1gO4TgdQ3gdQ3gMa9gUt/i8BYUgM/ThD7dX0RqOuB1k2OcKZm097g31dq191Vl3r4J2DlU+J6o16itrbiyoE1mwHHfxOT14oLgVsfc+w5iBTA4JWqDnsmhhCRcNs4YN8S4NJh4JNWgC7P9P5qyfYvIOBJzmZH7Qn+faUMyR0Z4OO/A98/Ilb8u3kEMHgWANk8oJdUwNrXgR2fAqtfBHT5QOdnnXoZRM7ihC0iIjKn1gAt7hHXKwauAHDtJHDkF8+OyR7unqTpCxP77O19e2iFCC4rc/oPYOlwoKQIuGmwqKlVqSxPHJMkoM+7IusOiMz8xg9EH1x9CaT0Lah9dTuk9C3sBUtuw8wrERGZ05cAe+bZ2EGBtlDu4InsqLdr1yt9jaW3d88Vdcv3zgPibhLb9CWm4w6OAL6+T/yB0rAHcM+XolevLZIE9HhDdGr4/R3g93eBzP1Axh4EZWegHQCkz3a9bReRFT6deZ06dSrat2+PyMhIxMXF4e6778aRI0e8PSwiosDnz83qPZEdtdbOylNsvsb/ASOXiSVzL6YBn3cHds0F0n6osDLYQODLO4HCLKBeR9GtIUhr/xi6vVTWC/jvH7mcLHmMT2deN27ciPHjx6N9+/YoLi7G66+/jt69eyMtLQ3h4eHeHh4RUeDy92b13s6OekJlr/GpbWIC1j+/irZnlsh6cXnLaNGGy1G3jQM2fWilQ4GLbbuIrPDp4HXNmjUmt+fPn4+4uDjs2bMHt99+u5dGRURUBfhKWyhXVIXOHrZeY0QcMPw7YPssUZtqlQT89hbQ8j7HA8z0bZW01nKhbZezKpZGBNofLeTbwWtFWVniFyQ2NtbqPoWFhSgsLDTezs7OBgDodDrodDqnntfwOGcfT76D5zJw8Fy6WWJ7BEUmAjmZkCy0hZJL20IVJ7YHXDwHPJfuJdVqWcl/9iLALD6xCXJSF8eOnXXOrkCiOOsc5PLnV18C6cx2Y4Ap1+2oSIAp/f0T1Oteg5RTVsIgRyaipPd7kJsNtPwgR8bipnH7Ik//XjryPJIsy5aa1fkcWZZx11134dq1a9i8ebPV/SZPnowpU8xXV1m8eDHCwpz4SoSIqIpKuL4L7U/OBGA6r93wn8au5GeQGWPnogDkNbWvbke79NmV7rc76Smci+3o0LGr5xxGl38qX2EtK6Q2TsT1xbmYDojLOYCWZ79GqO6q8f58TSwO1Blh/fMk61H9xhGE6K6jQBODKxFNRduucpz5vCZc32X3WBzZ15FxO7Wvoxw9tjvHYkVeXh6GDx+OrKwsREVF2dzXb4LX8ePH4+eff8aWLVtQp04dq/tZyrzWrVsXly9frvTNsEan0yE1NRW9evWCRqNx6hjkG3guAwfPpWdYzGRF1UZJr3etZ7IcxHPpXlL6FgR9dXel+xWPXOlw5hX6EgTNamMjQ186BsNttRYoKTTZJvYTt0rumW/2ubIrm2ocR4bFBmLGbwrG7zVmSqW/f4J62SPlnt36WBzZ16FxO7GvkZ1ZYEeP7dRYFJCdnY0aNWrYFbz6RdnAM888g1WrVmHTpk02A1cA0Gq10GrNZ0tqNBqX/1FU4hjkG3guAwfPpZu1HAI0H2xSQygldUKQG74q5bl0kwa327UymHOrDmrEcsJW2nZJADDoEyDvCrD3f5CunbB4FEPgG7TuVSBlUFm7rrRVQGnQaLJ/TiaClj0CDP4/oHoj0XM4x3p3DKm0NELzaTsgoiagCQPO7TY7rslY1k4EGnQFQmKA1Nds7CshKPV18XtieP8qG3f5rheO7GuQtsrKksYVWpM5emxnxqIQR373fTp4lWUZzzzzDFasWIENGzYgOTnZ20MiIqp6qsLEp0Dm7t63hrZdFoOpaWXBTp12wMJBto+Vkwm8EycmWoXXBC7/DcsBd+m2Vc84NtbsM+LHHjcuAB82FF+XG7oyWFQ6KS11EpDQGlAHAz8/b2PcEvDLRNFXV60R75utfSt2a0hbVXouKzzG0JrMEGDqSxw7tqP7e5FPB6/jx4/H4sWL8cMPPyAyMhLnz58HAERHRyM0NNTLoyMiIvIT9gaYrhy/2QAUn9iEfZvX4uaufcwzuTcu2ncsuURkUW1kUk2E1wKiawMZeyvft897QGxD4PhvwM7P7RyPrcC1nO0z7dsPsnhtUxPt2zf7HPC/IUBsA9HObO8i2AzofxgPXDgIXDluX6/mL3sBoTFA/lX7ezt7+Y9Znw5eZ88WBebdu3c32T5//nyMHj3a8wMiIiLyV+7ufatSQ07qgnOHstE6qYv5ce1tq3bvfKBaEnBwJbD9/yrfv+97QPMhYgGGSkoj0OFJMa7gcPuC15ErRCuw70dXvm+dWwFNKJB1Frh6vPL9HXFyo/ixR2E2sPF9+4+dscexsfhAb2efDl79ZC4ZERGRf/BmCUhSJ7tqb5FylxhnUZ59wWtELcdLI+wdS4Nu4qY9+45ZI45/crNYvawyw78VX9UvHVb5vu3Gir695/YAx9ZVvn9yd1Gre3hl5ft2ngDUbAZcPgZs+ajy/X2gt7NPLw9LREREAcIQYAKAWU8AGwGmxf4BpY+Jqi32AxxbFtiRsbhr3I16Ak362Ldv/w+B7q8AnZ61sl8Ft78I3DfPvmP3eBO4eRhw5+uOvd9exOCViIiIPMNdAWb54084CIz6CbhnrriccMByTa8jY/G3wNhQDuLIsZ15v73Ep8sGiIiIKMA4UnvrzEQzR0ojHB2LO8btyL6Olkc4+v65e2KfQhi8EhERkWe5K8B091j8LTB29Njl9rfZOcLLGLwSERGRb/PXXsO+EBg7euzS/W12jvAyBq9ERERE/sZfA3oFcMIWEREREfkNBq9ERERE5DcYvBIRERGR32DwSkRERER+g8ErEREREfkNBq9ERERE5DcCvlWWLIvVJ7Kzs50+hk6nQ15eHrKzs6HRaJQaGnkBz2Xg4LkMHDyXgYPnMnB4+lwa4jRD3GZLwAevOTk5AIC6det6eSREREREZEtOTg6io6Nt7iPJ9oS4fkyv1yMjIwORkZGQJMmpY2RnZ6Nu3bo4c+YMoqKiFB4heRLPZeDguQwcPJeBg+cycHj6XMqyjJycHCQmJkKlsl3VGvCZV5VKhTp16ihyrKioKP4yBgiey8DBcxk4eC4DB89l4PDkuaws42rACVtERERE5DcYvBIRERGR32DwagetVotJkyZBq9V6eyjkIp7LwMFzGTh4LgMHz2Xg8OVzGfATtoiIiIgocDDzSkRERER+g8ErEREREfkNBq9ERERE5DcYvBIRERGR32DwWonPPvsMycnJCAkJwS233ILNmzd7e0hUiU2bNmHQoEFITEyEJElYuXKlyf2yLGPy5MlITExEaGgounfvjkOHDnlnsGTT1KlT0b59e0RGRiIuLg533303jhw5YrIPz6d/mD17Nlq1amVseN6xY0f88ssvxvt5Hv3X1KlTIUkSJkyYYNzG8+kfJk+eDEmSTH7i4+ON9/vqeWTwasM333yDCRMm4PXXX8eff/6Jrl27ol+/fjh9+rS3h0Y25ObmonXr1pg1a5bF+z/44ANMnz4ds2bNwq5duxAfH49evXohJyfHwyOlymzcuBHjx4/Hjh07kJqaiuLiYvTu3Ru5ubnGfXg+/UOdOnUwbdo07N69G7t378add96Ju+66y/gfIc+jf9q1axc+//xztGrVymQ7z6f/aN68OTIzM40/Bw4cMN7ns+dRJqtuvfVW+cknnzTZ1qxZM/mVV17x0ojIUQDkFStWGG/r9Xo5Pj5enjZtmnFbQUGBHB0dLc+ZM8cLIyRHXLx4UQYgb9y4UZZlnk9/V61aNfnLL7/kefRTOTk5cuPGjeXU1FS5W7du8nPPPSfLMn8v/cmkSZPk1q1bW7zPl88jM69WFBUVYc+ePejdu7fJ9t69e2Pbtm1eGhW56uTJkzh//rzJedVqtejWrRvPqx/IysoCAMTGxgLg+fRXJSUlWLp0KXJzc9GxY0eeRz81fvx4DBgwAD179jTZzvPpX44dO4bExEQkJyfjwQcfxIkTJwD49nkM8uqz+7DLly+jpKQEtWrVMtleq1YtnD9/3kujIlcZzp2l85qenu6NIZGdZFnGCy+8gC5duqBFixYAeD79zYEDB9CxY0cUFBQgIiICK1asQEpKivE/Qp5H/7F06VLs3bsXu3btMruPv5f+o0OHDli0aBGaNGmCCxcu4J133kGnTp1w6NAhnz6PDF4rIUmSyW1Zls22kf/hefU/Tz/9NPbv348tW7aY3cfz6R+aNm2Kffv24fr161i2bBlGjRqFjRs3Gu/nefQPZ86cwXPPPYd169YhJCTE6n48n76vX79+xustW7ZEx44d0bBhQyxcuBC33XYbAN88jywbsKJGjRpQq9VmWdaLFy+a/RVC/sMwi5Ln1b8888wzWLVqFX7//XfUqVPHuJ3n078EBwejUaNGaNeuHaZOnYrWrVvjk08+4Xn0M3v27MHFixdxyy23ICgoCEFBQdi4cSP+7//+D0FBQcZzxvPpf8LDw9GyZUscO3bMp38vGbxaERwcjFtuuQWpqakm21NTU9GpUycvjYpclZycjPj4eJPzWlRUhI0bN/K8+iBZlvH0009j+fLlWL9+PZKTk03u5/n0b7Iso7CwkOfRz/To0QMHDhzAvn37jD/t2rXDiBEjsG/fPjRo0IDn008VFhbi8OHDSEhI8O3fS69NFfMDS5culTUajTx37lw5LS1NnjBhghweHi6fOnXK20MjG3JycuQ///xT/vPPP2UA8vTp0+U///xTTk9Pl2VZlqdNmyZHR0fLy5cvlw8cOCAPGzZMTkhIkLOzs708cqroqaeekqOjo+UNGzbImZmZxp+8vDzjPjyf/uHVV1+VN23aJJ88eVLev3+//Nprr8kqlUpet26dLMs8j/6ufLcBWeb59Bf/+te/5A0bNsgnTpyQd+zYIQ8cOFCOjIw0xjm+eh4ZvFbi008/lZOSkuTg4GC5bdu2xhY95Lt+//13GYDZz6hRo2RZFu0/Jk2aJMfHx8tarVa+/fbb5QMHDnh30GSRpfMIQJ4/f75xH55P/zBmzBjjv6U1a9aUe/ToYQxcZZnn0d9VDF55Pv3DAw88ICckJMgajUZOTEyUhw4dKh86dMh4v6+eR0mWZdk7OV8iIiIiIsew5pWIiIiI/AaDVyIiIiLyGwxeiYiIiMhvMHglIiIiIr/B4JWIiIiI/AaDVyIiIiLyGwxeiYiIiMhvMHglIiIiIr/B4JWIqIqQJAkrV6709jCIiFzC4JWIyANGjx4NSZLMfvr27evtoRER+ZUgbw+AiKiq6Nu3L+bPn2+yTavVemk0RET+iZlXIiIP0Wq1iI+PN/mpVq0aAPGV/uzZs9GvXz+EhoYiOTkZ3333ncnjDxw4gDvvvBOhoaGoXr06Hn/8cdy4ccNkn3nz5qF58+bQarVISEjA008/bXL/5cuXMWTIEISFhaFx48ZYtWqVe180EZHCGLwSEfmIN954A/fccw/++usvjBw5EsOGDcPhw4cBAHl5eejbty+qVauGXbt24bvvvsOvv/5qEpzOnj0b48ePx+OPP44DBw5g1apVaNSokclzTJkyBffffz/279+P/v37Y8SIEbh69apHXycRkSskWZZlbw+CiCjQjR49Gl999RVCQkJMtk+cOBFvvPEGJEnCk08+idmzZxvvu+2229C2bVt89tln+OKLLzBx4kScOXMG4eHhAIDVq1dj0KBByMjIQK1atVC7dm088sgjeOeddyyOQZIk/Pvf/8bbb78NAMjNzUVkZCRWr17N2lsi8huseSUi8pA77rjDJDgFgNjYWOP1jh07mtzXsWNH7Nu3DwBw+PBhtG7d2hi4AkDnzp2h1+tx5MgRSJKEjIwM9OjRw+YYWrVqZbweHh6OyMhIXLx40dmXRETkcQxeiYg8JDw83Oxr/MpIkgQAkGXZeN3SPqGhoXYdT6PRmD1Wr9c7NCYiIm9izSsRkY/YsWOH2e1mzZoBAFJSUrBv3z7k5uYa79+6dStUKhWaNGmCyMhI1K9fH7/99ptHx0xE5GnMvBIReUhhYSHOnz9vsi0oKAg1atQAAHz33Xdo164dunTpgq+//ho7d+7E3LlzAQAjRozApEmTMGrUKEyePBmXLl3CM888g4ceegi1atUCAEyePBlPPvkk4uLi0K9fP+Tk5GDr1q145plnPPtCiYjciMErEZGHrFmzBgkJCSbbmjZtir///huA6ASwdOlSjBs3DvHx8fj666+RkpICAAgLC8PatWvx3HPPoX379ggLC8M999yD6dOnG481atQoFBQU4OOPP8aLL76IGjVq4N577/XcCyQi8gB2GyAi8gGSJGHFihW4++67vT0UIiKfxppXIiIiIvIbDF6JiIiIyG+w5pWIyAewgouIyD7MvBIRERGR32DwSkRERER+g8ErEREREfkNBq9ERERE5DcYvBIRERGR32DwSkRERER+g8ErEREREfkNBq9ERERE5Df+H3aSrg4NX3CzAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "# 13) Plot loss and MAE curves\n", + "epochs = list(range(1, n_epochs + 1))\n", + "\n", + "plt.figure(figsize=(8, 4))\n", + "plt.plot(epochs, train_losses_epochs, marker='o', label='Train Loss')\n", + "plt.plot(epochs, test_losses_epochs, marker='o', label='Test Loss')\n", + "plt.xlabel('Epoch')\n", + "plt.ylabel('SmoothL1 Loss')\n", + "plt.title('Training vs. Validation Loss by Epoch')\n", + "plt.legend()\n", + "plt.grid(True)\n", + "plt.show()\n", + "\n", + "plt.figure(figsize=(8, 4))\n", + "plt.plot(epochs, mae_sums_epochs, marker='o', color='tab:orange', label='Val MAE (lat+lon)')\n", + "plt.xlabel('Epoch')\n", + "plt.ylabel('MAE Sum (cm)')\n", + "plt.title('Validation MAE (cm) by Epoch')\n", + "plt.legend()\n", + "plt.grid(True)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ed39685e-23ee-4af8-a425-473d7f476273", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/references/.gitkeep b/references/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/reports/.gitkeep b/reports/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/reports/figures/.gitkeep b/reports/figures/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..d4f7d11 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,10 @@ +# local package +-e . + +# external requirements +click +Sphinx +coverage +awscli +flake8 +python-dotenv>=0.5.1 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..dc3d4c9 --- /dev/null +++ b/setup.py @@ -0,0 +1,10 @@ +from setuptools import find_packages, setup + +setup( + name='src', + packages=find_packages(), + version='0.1.0', + description='A short description of the project.', + author='Your name (or your organization/company/team)', + license='MIT', +) diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/data/.gitkeep b/src/data/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/src/data/__init__.py b/src/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/data/make_dataset.py b/src/data/make_dataset.py new file mode 100644 index 0000000..96b377a --- /dev/null +++ b/src/data/make_dataset.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +import click +import logging +from pathlib import Path +from dotenv import find_dotenv, load_dotenv + + +@click.command() +@click.argument('input_filepath', type=click.Path(exists=True)) +@click.argument('output_filepath', type=click.Path()) +def main(input_filepath, output_filepath): + """ Runs data processing scripts to turn raw data from (../raw) into + cleaned data ready to be analyzed (saved in ../processed). + """ + logger = logging.getLogger(__name__) + logger.info('making final data set from raw data') + + +if __name__ == '__main__': + log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + logging.basicConfig(level=logging.INFO, format=log_fmt) + + # not used in this stub but often useful for finding various files + project_dir = Path(__file__).resolve().parents[2] + + # find .env automagically by walking up directories until it's found, then + # load up the .env entries as environment variables + load_dotenv(find_dotenv()) + + main() diff --git a/src/features/.gitkeep b/src/features/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/src/features/__init__.py b/src/features/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/features/build_features.py b/src/features/build_features.py new file mode 100644 index 0000000..e69de29 diff --git a/src/models/.gitkeep b/src/models/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/src/models/__init__.py b/src/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/models/predict_model.py b/src/models/predict_model.py new file mode 100644 index 0000000..e69de29 diff --git a/src/models/train_model.py b/src/models/train_model.py new file mode 100644 index 0000000..e69de29 diff --git a/src/visualization/.gitkeep b/src/visualization/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/src/visualization/__init__.py b/src/visualization/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/visualization/visualize.py b/src/visualization/visualize.py new file mode 100644 index 0000000..e69de29 diff --git a/test_environment.py b/test_environment.py new file mode 100644 index 0000000..d0ac4a7 --- /dev/null +++ b/test_environment.py @@ -0,0 +1,25 @@ +import sys + +REQUIRED_PYTHON = "python3" + + +def main(): + system_major = sys.version_info.major + if REQUIRED_PYTHON == "python": + required_major = 2 + elif REQUIRED_PYTHON == "python3": + required_major = 3 + else: + raise ValueError("Unrecognized python interpreter: {}".format( + REQUIRED_PYTHON)) + + if system_major != required_major: + raise TypeError( + "This project requires Python {}. Found: Python {}".format( + required_major, sys.version)) + else: + print(">>> Development environment passes all tests!") + + +if __name__ == '__main__': + main() diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..c32fbd8 --- /dev/null +++ b/tox.ini @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 79 +max-complexity = 10 From f4d5608c6fa30d8df234c927ca1fdf0998295279 Mon Sep 17 00:00:00 2001 From: IkaKes Date: Wed, 11 Jun 2025 12:57:45 +0000 Subject: [PATCH 02/11] requirements --- notebooks/test.ipynb | 370 ++++++++++++++++++------------------------- requirements.txt | 30 +++- 2 files changed, 182 insertions(+), 218 deletions(-) diff --git a/notebooks/test.ipynb b/notebooks/test.ipynb index d6d7628..8bf13f7 100644 --- a/notebooks/test.ipynb +++ b/notebooks/test.ipynb @@ -97,7 +97,7 @@ } ], "source": [ - "# 2) \n", + "# 2) GPU-enabled PyTorch 2.4.0 + CUDA 12.4\n", "!pip install \\\n", " torch==2.4.0+cu124 \\\n", " torchvision==0.19.0+cu124 \\\n", @@ -318,80 +318,61 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 28, "id": "86d9f908-4760-450c-9116-6720536c8842", "metadata": {}, "outputs": [], "source": [ - "# 1) Single measurement:\n", + "#1) Loading single measurement:\n", + "\n", "def load_and_process_single_measurement(sats_csv_path, receiver_csv_path):\n", - " \"\"\"\n", - " Reads satellite and receiver CSVs and constructs lists for:\n", - " - feature_dicts: [{ 'receiver': (1x2), 'satellite': (n_satx3) }, …]\n", - " - target_dicts: [{ 'receiver': (1x2) }, …]\n", - " - edge_index_dicts: [{ ('receiver','to','satellite'): (2xn_sat), ('satellite','rev_to','receiver'): (2xn_sat) }, …]\n", - " - edge_weight_dicts: [None, None, …]\n", - " - time_steps: [t0, t1, …]\n", - " - additional_sids: [{ 'satellite_s_ids': { 'satellite': array(n_sat,) } }, …]\n", - " \"\"\"\n", - " sats_df_meas = pd.read_csv(sats_csv_path)\n", - " receiver_df_meas = pd.read_csv(receiver_csv_path)\n", - " time_steps_meas = sorted(receiver_df_meas['T_ID'].unique())\n", - " \n", - " feature_dicts_meas = []\n", - " target_dicts_meas = []\n", - " edge_index_dicts_meas = []\n", - " additional_sids_dicts = []\n", - " \n", - " for t_local in time_steps_meas:\n", - " rec = receiver_df_meas[receiver_df_meas['T_ID'] == t_local].iloc[0]\n", - " feat_rec = rec[['Lat', 'Lon']].to_numpy().reshape(1, 2)\n", - " targ_rec = rec[['LatDev', 'LonDev']].to_numpy().reshape(1, 2)\n", - " \n", - " sats_t = sats_df_meas[sats_df_meas['T_ID'] == t_local].sort_values('S_ID')\n", - " feat_sat = sats_t[['SNR', 'az', 'el']].to_numpy()\n", - " s_ids_sat = sats_t['S_ID'].values.astype(np.int64)\n", - " n_sat = feat_sat.shape[0]\n", - " \n", - " if n_sat > 0 and n_sat != len(s_ids_sat):\n", - " s_ids_sat = s_ids_sat[:n_sat]\n", - " elif n_sat == 0 and len(s_ids_sat) > 0:\n", - " s_ids_sat = np.array([], dtype=np.int64)\n", + " sats_df = pd.read_csv(sats_csv_path)\n", + " rec_df = pd.read_csv(receiver_csv_path)\n", + " t_ids = sorted(rec_df['T_ID'].unique())\n", + "\n", + " #lists to collect per-timestep data\n", + " feature_dicts = []\n", + " target_dicts = []\n", + " edge_index_dicts = []\n", + " satellite_id_dicts = []\n", + "\n", + " for t in t_ids:\n", + " #grabbing the recievers features and target values\n", + " rec_row = rec_df[rec_df['T_ID'] == t].iloc[0] \n", + " feat_rec = rec_row[['Lat', 'Lon']].to_numpy().reshape(1, 2)\n", + " targ_rec = rec_row[['LatDev', 'LonDev']].to_numpy().reshape(1, 2)\n", " \n", - " src = np.zeros(n_sat, dtype=int)\n", - " dst = np.arange(n_sat, dtype=int)\n", - " edges = np.vstack([src, dst])\n", - " edges_rev = edges[::-1].copy()\n", - " \n", - " feature_dicts_meas.append({\n", - " 'receiver': feat_rec,\n", + " #visible satellites at that timestep (features+IDs)\n", + " sats_t = sats_df[sats_df['T_ID'] == t].sort_values('S_ID')\n", + " n_sat = len(sats_t)\n", + " feat_sat = sats_t[['SNR', 'az', 'el']].to_numpy().reshape(n_sat, 3)\n", + " s_ids_sat = sats_t['S_ID'].values.astype(np.int64)\n", + "\n", + " src = np.zeros(n_sat, dtype=int)\n", + " dst = np.arange(n_sat, dtype=int)\n", + " edges = np.vstack([src, dst]) #building edges from the reciever to visible satellites\n", + " edges_rev = edges[::-1].copy() # revdrsing to get satellite-reciever edges\n", + "\n", + " feature_dicts.append({\n", + " 'receiver': feat_rec,\n", " 'satellite': feat_sat\n", " })\n", - " target_dicts_meas.append({\n", - " 'receiver': targ_rec\n", - " })\n", - " edge_index_dicts_meas.append({\n", - " ('receiver', 'to', 'satellite'): edges,\n", - " ('satellite', 'rev_to', 'receiver'): edges_rev\n", - " })\n", - " additional_sids_dicts.append({\n", - " 'satellite_s_ids': s_ids_sat\n", + " target_dicts.append({'receiver': targ_rec})\n", + " edge_index_dicts.append({\n", + " ('receiver','to','satellite'): edges,\n", + " ('satellite','rev_to','receiver'): edges_rev\n", " })\n", - " \n", - " edge_weight_dicts_meas = [None] * len(time_steps_meas)\n", - " return (\n", - " feature_dicts_meas,\n", - " target_dicts_meas,\n", - " edge_index_dicts_meas,\n", - " edge_weight_dicts_meas,\n", - " time_steps_meas,\n", - " additional_sids_dicts\n", - " )" + " satellite_id_dicts.append({'satellite_s_ids': s_ids_sat})\n", + "\n", + " edge_weight_dicts = [None] * len(feature_dicts)\n", + "\n", + "\n", + " return feature_dicts, target_dicts, edge_index_dicts, edge_weight_dicts, satellite_id_dicts" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 30, "id": "b109c495-b981-4f23-9a3e-3de9c479d5c1", "metadata": {}, "outputs": [ @@ -399,115 +380,89 @@ "name": "stdout", "output_type": "stream", "text": [ - "DEBUG: Starting to load and process all measurements...\n", - "Ukupno mjerenja: 50\n", - " Za trening: 25 (IDs: ['R_0', 'R_1', 'R_2', 'R_3', 'R_4', 'R_5', 'R_6', 'R_7', 'R_8', 'R_9', 'R_10', 'R_11', 'R_12', 'R_13', 'R_14', 'R_15', 'R_16', 'R_17', 'R_18', 'R_19', 'R_20', 'R_21', 'R_22', 'R_23', 'R_24'])\n", - " Za test: 25 (IDs: ['R_25', 'R_26', 'R_27', 'R_28', 'R_29', 'R_30', 'R_31', 'R_32', 'R_33', 'R_34', 'R_35', 'R_36', 'R_37', 'R_38', 'R_39', 'R_40', 'R_41', 'R_42', 'R_43', 'R_44', 'R_45', 'R_46', 'R_47', 'R_48', 'R_49'])\n" + "Ukupno učitanih mjerenja: 50\n", + " Za trening: 25 (IDs: ['R_0', 'R_1', 'R_2', 'R_3', 'R_4', 'R_5', 'R_6', 'R_7', 'R_8', 'R_9', 'R_10', 'R_11', 'R_12', 'R_13', 'R_14', 'R_15', 'R_16', 'R_17', 'R_18', 'R_19', 'R_20', 'R_21', 'R_22', 'R_23', 'R_24'])\n", + " Za test: 25 (IDs: ['R_25', 'R_26', 'R_27', 'R_28', 'R_29', 'R_30', 'R_31', 'R_32', 'R_33', 'R_34', 'R_35', 'R_36', 'R_37', 'R_38', 'R_39', 'R_40', 'R_41', 'R_42', 'R_43', 'R_44', 'R_45', 'R_46', 'R_47', 'R_48', 'R_49'])\n" ] } ], "source": [ - "# --- 1) Učitavanje i predobrada više mjerenja ---\n", - "print(\"DEBUG: Starting to load and process all measurements...\")\n", - "\n", - "base_server_path = \"/home/jovyan/shared/Ivana_GNN/Sateliti/parsed/Ublox10/cw/-65/\"\n", - "\n", - "# Programski generiramo definicije za R_0 … R_49\n", - "measurement_definitions = []\n", - "for i in range(50):\n", - " folder = f\"R_{i}\"\n", - " sats_path = os.path.join(base_server_path, folder, \"sats_data.csv\")\n", - " receiver_path = os.path.join(base_server_path, folder, \"reciever_data.csv\")\n", - " measurement_definitions.append({\n", - " \"id\": folder,\n", - " \"sats\": sats_path,\n", - " \"receiver\": receiver_path,\n", - " })\n", + "#2) Loading all measurements\n", + "\n", + "base_path = \"/home/jovyan/shared/Ivana_GNN/Sateliti/parsed/Ublox10/cw/-65/\"\n", + "\n", + "measurement_definitions = [\n", + " {\n", + " \"id\": f\"R_{i}\",\n", + " \"sats\": os.path.join(base_path, f\"R_{i}\", \"sats_data.csv\"),\n", + " \"receiver\": os.path.join(base_path, f\"R_{i}\", \"reciever_data.csv\"),\n", + " }\n", + " for i in range(50)\n", + "]\n", "\n", "all_measurements_processed = []\n", + "\n", "for m_info in measurement_definitions:\n", - " #print(f\"Učitavanje mjerenja: {m_info['id']} (Sats: {m_info['sats']}, Receiver: {m_info['receiver']})\")\n", - " if not (os.path.exists(m_info[\"sats\"]) and os.path.exists(m_info[\"receiver\"])):\n", - " print(f\" UPOZORENJE: Datoteke za {m_info['id']} ne postoje, preskačem.\")\n", + " sats_path = m_info[\"sats\"]\n", + " rec_path = m_info[\"receiver\"]\n", + "\n", + " if not (os.path.exists(sats_path) and os.path.exists(rec_path)):\n", + " print(f\" UPOZORENJE: Datoteke za {m_info['id']} ne postoje.\")\n", " continue\n", "\n", - " (\n", - " features,\n", - " targets,\n", - " edges,\n", - " weights,\n", - " times,\n", - " additional_sids_dicts\n", - " ) = load_and_process_single_measurement(\n", - " m_info[\"sats\"], m_info[\"receiver\"]\n", - " )\n", "\n", - " # Ako nema dovoljno snapshotova, preskoči\n", - " if features is None or len(features) < window_size:\n", - " print(f\" INFO: Premalo snimaka ({len(features) if features else 0}), preskačem.\")\n", - " continue\n", + " features, targets, edges, weights, additional_sids_dicts = \\\n", + " load_and_process_single_measurement(sats_path, rec_path)\n", "\n", " all_measurements_processed.append({\n", " \"id\": m_info[\"id\"],\n", - " \"features\": features,\n", - " \"targets\": targets,\n", - " \"edges\": edges,\n", - " \"weights\": weights,\n", - " \"time_steps\": times,\n", - " \"satellite_s_ids\": additional_sids_dicts\n", + " \"features\": features, # List[Dict{'receiver': (1×2), 'satellite': (n_sat×3)}]\n", + " \"targets\": targets, # List[Dict{'receiver': (1×2)}]\n", + " \"edges\": edges, # List[Dict{('receiver','to','satellite'): (2×n_sat), …}]\n", + " \"weights\": weights, # List[None], jedan None po timestampu\n", + " \"time_steps\": times, # List[int], T_ID vrijednosti po vremenu\n", + " \"satellite_s_ids\": additional_sids_dicts # List[Dict{'satellite_s_ids': array(n_sat,)}]\n", " })\n", "\n", "if not all_measurements_processed:\n", " raise ValueError(\n", - " \"Nijedno mjerenje nije učitano/obrađeno. Provjerite putanje do datoteka i sadržaj CSV-ova.\"\n", - " )\n", + " \"Nijedno mjerenje nije učitano.\")\n", "\n", - "# Podjela: prva 40 za trening, zadnjih 10 za test\n", + "# Change for different splits:\n", "train_measurements_data = all_measurements_processed[:25]\n", "test_measurements_data = all_measurements_processed[25:]\n", "\n", - "print(f\"Ukupno mjerenja: {len(all_measurements_processed)}\")\n", - "print(f\" Za trening: {len(train_measurements_data)} (IDs: {[m['id'] for m in train_measurements_data]})\")\n", - "print(f\" Za test: {len(test_measurements_data)} (IDs: {[m['id'] for m in test_measurements_data]})\")\n" + "print(f\"Ukupno ucitanih mjerenja: {len(all_measurements_processed)}\")\n", + "print(f\" Za trening: {len(train_measurements_data)} (IDs: {[m['id'] for m in train_measurements_data]})\")\n", + "print(f\" Za test: {len(test_measurements_data)} (IDs: {[m['id'] for m in test_measurements_data]})\")\n" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 32, "id": "a76a33ac-5d37-4800-ad04-6c2693a34569", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "DEBUG: Aggregating raw train features/targets for StandardScaler...\n" - ] - } - ], + "outputs": [], "source": [ - "# 3) Aggregation for normalization\n", - "print(\"DEBUG: Aggregating raw train features/targets for StandardScaler...\")\n", + "# 3) Aggregation of the training data\n", "\n", - "agg_train_rec_feats = []\n", - "agg_train_sat_feats = []\n", - "agg_train_targ_rec = []\n", + "agg_train_rec_feats = [] \n", + "agg_train_sat_feats = [] \n", + "agg_train_targ_rec = [] \n", "\n", "for meas_data in train_measurements_data:\n", - " num_ts = len(meas_data[\"features\"])\n", - " for i in range(num_ts):\n", - " fr = meas_data[\"features\"][i]['receiver']\n", + " for feat_dict, targ_dict in zip(meas_data[\"features\"], meas_data[\"targets\"]):\n", + "\n", + " fr = feat_dict['receiver']\n", " agg_train_rec_feats.append(fr)\n", "\n", - " fs = meas_data[\"features\"][i]['satellite']\n", + " fs = feat_dict['satellite']\n", " if fs.size > 0:\n", " agg_train_sat_feats.append(fs)\n", "\n", - " tr = meas_data[\"targets\"][i]['receiver']\n", + " tr = targ_dict['receiver']\n", " agg_train_targ_rec.append(tr)\n", "\n", - "if not agg_train_rec_feats:\n", - " raise ValueError(\"No training data available for normalization statistics.\")\n", "\n", "rec_feats_np = np.vstack(agg_train_rec_feats)\n", "sat_feats_np = np.vstack(agg_train_sat_feats)\n", @@ -516,28 +471,19 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 33, "id": "6cdbe27f-561a-42e4-9478-2594c0b69f50", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "DEBUG: Fitted StandardScalers on train set.\n" - ] - } - ], + "outputs": [], "source": [ + "#4) Fit StandardScalers on TRAINING data \n", "\n", - "# 4) Fit StandardScalers on TRAINING data \n", "rec_scaler = StandardScaler().fit(rec_feats_np)\n", "targ_scaler = StandardScaler().fit(targ_rec_np)\n", "sat_scaler = StandardScaler().fit(sat_feats_np)\n", "\n", - "print(\"DEBUG: Fitted StandardScalers on train set.\")\n", "\n", - "# 5) Normalization function\n", + "#5) Normalization function\n", "def normalize_with_scalers(measurement_data_list, rec_scaler, sat_scaler, targ_scaler):\n", " normalized_measurements = []\n", " for meas_data in measurement_data_list:\n", @@ -582,27 +528,19 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 34, "id": "f239b3f0-9993-4318-a951-1e585c815e46", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "DEBUG: Finished StandardScaler normalization on train & test sets.\n" - ] - } - ], + "outputs": [], "source": [ - "# 6) Apply normalization to train & test\n", + "#6) Apply normalization to train & test\n", + "\n", "normalized_train_measurements = normalize_with_scalers(\n", " train_measurements_data, rec_scaler, sat_scaler, targ_scaler\n", ")\n", "normalized_test_measurements = normalize_with_scalers(\n", " test_measurements_data, rec_scaler, sat_scaler, targ_scaler\n", - ")\n", - "print(\"DEBUG: Finished StandardScaler normalization on train & test sets.\")\n" + ")\n" ] }, { @@ -621,10 +559,12 @@ } ], "source": [ - "# 7) Create DynamicHeteroGraphTemporalSignal objects\n", - "print(\"DEBUG: Creating DynamicHeteroGraphTemporalSignal objects...\")\n", + "#7) Create DynamicHeteroGraphTemporalSignal\n", "\n", "def create_signals(measurements, split_name):\n", + " '''\n", + " Creating HeteroData objects for each timstamp/snapshot \n", + " '''\n", " signals = []\n", " for meas_data in measurements:\n", " signal = DynamicHeteroGraphTemporalSignal(\n", @@ -638,15 +578,17 @@ " #print(f\"Created {split_name} signal {meas_data['id']} (snapshots: {signal.snapshot_count})\")\n", " return signals\n", "\n", + "# Create temporal signals for all normalized TRAIN measurements\n", "train_signals = create_signals(normalized_train_measurements, \"train\")\n", - "test_signals = create_signals(normalized_test_measurements, \"test\")\n", "\n", - "print(\"DEBUG: Finished creating DynamicHeteroGraphTemporalSignal objects.\")" + "# Create temporal signals for all normalized TEST measurements\n", + "test_signals = create_signals(normalized_test_measurements, \"test\")\n", + "\n" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 35, "id": "9bbc6a86-db2d-492a-8b16-faf854c798a1", "metadata": {}, "outputs": [ @@ -654,16 +596,19 @@ "name": "stdout", "output_type": "stream", "text": [ - "DEBUG: Creating sliding window DataLoaders...\n", - "DEBUG: Total training steps: n_epochs=50, num_train_windows_total=700\n", - "DEBUG: Total training steps: n_epochs=50, num_test_windows_total=700\n", - "DEBUG: total_steps = 35000\n" + " Total training steps: n_epochs=50, num_train_windows_total=700\n", + " Total training steps: n_epochs=50, num_test_windows_total=700\n", + " total_steps = 35000\n" ] } ], "source": [ - "# 8) Sliding‐window Dataset & DataLoader\n", - "print(\"DEBUG: Creating sliding window DataLoaders...\")\n", + "#8) Sliding‐window Dataset & DataLoader\n", + "'''\n", + " Building a PyTorch Dataset that generates sliding windows containing HeteroData snapshots,\n", + " and both test and train loaders\n", + " \n", + "'''\n", "\n", "class SlidingWindowDataset(Dataset):\n", " def __init__(self, signal, window_size, stride=1):\n", @@ -696,9 +641,11 @@ " collate_fn=lambda batch: batch[0]\n", " )\n", "\n", - "# Choose stride window_size for non‐overlapping windows:\n", + "# Choose stride for (non‐)overlapping windows:\n", "train_loader = build_loader(train_signals, shuffle=True, stride=window_size)\n", "test_loader = build_loader(test_signals, shuffle=False, stride=window_size)\n", + "\n", + "\n", "'''\n", "if train_loader is None or len(train_loader.dataset) == 0:\n", " raise ValueError(\"No training windows after sliding‐window split.\")\n", @@ -709,46 +656,38 @@ "else:\n", " print(f\"DEBUG: Test DataLoader created. Total windows: {len(test_loader.dataset)}\")\n", "'''\n", + "\n", "num_train_windows_total = len(train_loader.dataset)\n", "num_test_windows_total = len(test_loader.dataset)\n", "\n", "total_steps = n_epochs * num_train_windows_total\n", - "print(f\"DEBUG: Total training steps: n_epochs={n_epochs}, num_train_windows_total={num_train_windows_total}\")\n", - "print(f\"DEBUG: Total training steps: n_epochs={n_epochs}, num_test_windows_total={num_test_windows_total}\")\n", "\n", - "print(f\"DEBUG: total_steps = {total_steps}\")" + "print(f\" Total training steps: n_epochs={n_epochs}, num_train_windows_total={num_train_windows_total}\")\n", + "print(f\" Total training steps: n_epochs={n_epochs}, num_test_windows_total={num_test_windows_total}\")\n", + "\n", + "print(f\" total_steps = {total_steps}\")" ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 38, "id": "d55c1f9f-042b-419b-ab45-09e643de25a0", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "DEBUG: Defining run_window function...\n", - "DEBUG: run_window defined.\n" - ] - } - ], + "outputs": [], "source": [ - "# 9) Define run_window function\n", - "print(\"DEBUG: Defining run_window function...\")\n", + "#9) Define function for iterating the GCLSTM cell through all snapshots in the window\n", "\n", "def run_window(window_snapshots, model, hidden_dim, device):\n", - " \"\"\"\n", - " window_Snapshots: list of HeteroData objects (length = window_size)\n", - " model: FullModel (contains GCLSTM + regression head)\n", + " '''\n", + " window_Snapshots: list of HeteroData objects \n", + " model: Model (defined in the cell below)\n", " hidden_dim: int\n", " device: torch.device\n", "\n", " Returns:\n", - " pred_norm: (1x2) tensor\n", - " true_norm: (1x2) tensor\n", - " \"\"\"\n", + " pred_norm: (1x2) tensor - normalized lat/lon predictions for last snapshot\n", + " true_norm: (1x2) tensor - ground truth for last snapshot\n", + " '''\n", " h_state = {'receiver': torch.zeros(hidden_dim, device=device)}\n", " c_state = {'receiver': torch.zeros(hidden_dim, device=device)}\n", "\n", @@ -762,7 +701,7 @@ " for rel in snapshot.edge_index_dict\n", " }\n", "\n", - " rec_h = h_state['receiver'].unsqueeze(0) # (1xhidden_dim)\n", + " rec_h = h_state['receiver'].unsqueeze(0) \n", " rec_c = c_state['receiver'].unsqueeze(0)\n", "\n", " s_ids_val = snapshot['satellite_s_ids']['satellite_s_ids']\n", @@ -790,25 +729,25 @@ " h_state[sid] = h_out['satellite'][j]\n", " c_state[sid] = c_out['satellite'][j]\n", "\n", - " h_final = h_state['receiver'].unsqueeze(0) # (1xhidden_dim)\n", + " h_final = h_state['receiver'].unsqueeze(0) \n", " h_dropped = model.dropout(h_final)\n", - " pred_norm = torch.cat([model.lin_lat(h_dropped), model.lin_lon(h_dropped)], dim=-1) # (1x2)\n", + " pred_norm = torch.cat([model.lin_lat(h_dropped), model.lin_lon(h_dropped)], dim=-1) \n", "\n", - " true_norm = window_snapshots[-1].y_dict['receiver'].to(device) # (1x2)\n", + " true_norm = window_snapshots[-1].y_dict['receiver'].to(device) \n", " return pred_norm, true_norm\n", - "\n", - "print(\"DEBUG: run_window defined.\")" + "\n" ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 39, "id": "9ac8fe60-3b1c-476e-9c92-0f0f1a5fd2b7", "metadata": {}, "outputs": [], "source": [ - "# 10) Define FullModel \n", - "class FullModel(nn.Module):\n", + "# 10) Define the Model \n", + "\n", + "class Model(nn.Module):\n", " def __init__(self, in_channels_dict, hidden_dim, metadata, dropout_rate=0.1):\n", " super().__init__()\n", " self.gclstm = HeteroGCLSTM(\n", @@ -822,15 +761,15 @@ "\n", " def forward(self, x_dict, edge_index_dict, h_dict=None, c_dict=None):\n", " h_out, c_out = self.gclstm(x_dict, edge_index_dict, h_dict, c_dict)\n", - " h_rec = h_out['receiver'] # (1xhidden_dim)\n", + " h_rec = h_out['receiver'] \n", " h_rec = self.dropout(h_rec)\n", - " coords = torch.cat([self.lin_lat(h_rec), self.lin_lon(h_rec)], dim=-1) # (1x2)\n", + " coords = torch.cat([self.lin_lat(h_rec), self.lin_lon(h_rec)], dim=-1) \n", " return coords, h_out, c_out\n" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 40, "id": "d33440c3-4054-40d6-acc0-35fed184a15e", "metadata": {}, "outputs": [ @@ -838,20 +777,20 @@ "name": "stdout", "output_type": "stream", "text": [ - "DEBUG: Using device: cuda\n", - "DEBUG: Model, optimizer, loss_fn, and scheduler are set up.\n" + " Using device: cuda\n" ] } ], "source": [ "# 11) Initialize model, optimizer, loss, scheduler\n", + "\n", "if not train_signals or train_signals[0].snapshot_count == 0:\n", " raise ValueError(\"No training signals or first signal is empty for metadata.\")\n", "\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", - "print(f\"DEBUG: Using device: {device}\")\n", + "print(f\" Using device: {device}\")\n", "\n", - "model = FullModel(\n", + "model = Model(\n", " in_channels_dict = {'receiver': 2, 'satellite': 3},\n", " hidden_dim = hidden_dim,\n", " metadata = train_signals[0][0].metadata()\n", @@ -871,8 +810,7 @@ " pct_start = pct_start_onecycle,\n", " anneal_strategy = 'cos'\n", ")\n", - "\n", - "print(\"DEBUG: Model, optimizer, loss_fn, and scheduler are set up.\")\n" + "\n" ] }, { @@ -1011,7 +949,7 @@ ], "source": [ "# 12) Training & evaluation loop\n", - "print(\"DEBUG: Starting training loop...\")\n", + "\n", "train_losses_epochs, test_losses_epochs, mae_sums_epochs = [], [], []\n", "best_test_loss = float('inf')\n", "best_epoch_metrics = {}\n", @@ -1030,7 +968,7 @@ " mae_sums_epochs.append(float('nan'))\n", " continue\n", "\n", - " # trening\n", + " # train\n", " for window_snapshots in train_loader:\n", " optimizer.zero_grad()\n", " pred_norm, true_norm = run_window(window_snapshots, model, hidden_dim, device)\n", @@ -1051,7 +989,7 @@ " avg_train_loss_epoch = total_train_loss_epoch / len(train_loader)\n", " train_losses_epochs.append(avg_train_loss_epoch)\n", "\n", - " # evaluacija\n", + " # eval\n", " if test_loader is not None and len(test_loader.dataset) > 0:\n", " model.eval()\n", " total_test_loss_epoch = 0.0\n", @@ -1101,7 +1039,7 @@ " 'mae_sum': mae_sum_epoch\n", " }\n", " best_model_state = copy.deepcopy(model.state_dict())\n", - " torch.save(best_model_state, 'best_full_model.pth')\n", + " torch.save(best_model_state, 'best_model.pth')\n", " print(f\" >> Best model updated.\")\n", " else:\n", " mae_sums_epochs.append(float('nan'))\n", @@ -1124,7 +1062,7 @@ " f\"Lon: {best_epoch_metrics['mae_lon']:.2f} cm \"\n", " f\"(sum: {best_epoch_metrics['mae_sum']:.2f} cm)\"\n", " )\n", - " print(\"Best model saved as 'best_full_model.pth'\")\n" + " print(\"Best model saved as 'best_model.pth'\")\n" ] }, { diff --git a/requirements.txt b/requirements.txt index d4f7d11..fe01299 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,36 @@ -# local package +# —–– lokalni paket (editable install) -e . -# external requirements +--extra-index-url https://download.pytorch.org/whl/cu124 +--find-links https://data.pyg.org/whl/torch-2.4.0+cu124.html + +# —–– cookiecutter default external requirements: click Sphinx coverage awscli flake8 python-dotenv>=0.5.1 + +# —–– Data science & ML libraries: +numpy +scipy +pandas +matplotlib +scikit-learn + +# —–– PyTorch + CUDA build: +torch==2.4.0+cu124 +torchvision==0.19.0+cu124 +torchaudio==2.4.0+cu124 + +# —–– PyG low-level CUDA kerneli: +pyg_lib +torch_scatter==2.1.2 +torch_sparse==0.6.17 +torch_cluster==1.6.1 +torch_spline_conv==1.2.2 + +# —–– PyG high-level: +torch-geometric==2.3.1 +torch-geometric-temporal==0.54.0 From 6e31b9f1bacfbf278dfaa6d1b0b02ca48af85a92 Mon Sep 17 00:00:00 2001 From: IkaKes Date: Fri, 18 Jul 2025 14:31:42 +0000 Subject: [PATCH 03/11] New directory structure, with modularized code --- .gitignore | 61 +++-- LICENSE | 23 +- Makefile | 160 ++++-------- README.md | 62 ++++- {src/data => docs}/.gitkeep | 0 docs/Makefile | 153 ----------- docs/README.md | 12 + docs/commands.rst | 10 - docs/conf.py | 244 ------------------ .../getting-started.md} | 0 docs/docs/index.md | 10 + docs/index.rst | 24 -- docs/make.bat | 190 -------------- docs/mkdocs.yml | 4 + dvc.yaml | 25 ++ gnss/__init__.py | 1 + gnss/config.py | 28 ++ gnss/dataset.py | 162 ++++++++++++ gnss/features.py | 36 +++ gnss/model.py | 70 +++++ {src => gnss/modeling}/__init__.py | 0 gnss/modeling/train.py | 155 +++++++++++ gnss/plots.py | 32 +++ params.yaml | 41 +++ prepare_data.py | 221 ++++++++++++++++ pyproject.toml | 32 +++ requirements.txt | 51 ++-- run_all_experiments.py | 140 ++++++++++ run_experiment.py | 88 +++++++ setup.py | 10 - src/data/__init__.py | 0 src/data/make_dataset.py | 30 --- src/features/.gitkeep | 0 src/features/__init__.py | 0 src/features/build_features.py | 0 src/models/.gitkeep | 0 src/models/__init__.py | 0 src/models/predict_model.py | 0 src/models/train_model.py | 0 src/visualization/.gitkeep | 0 src/visualization/__init__.py | 0 src/visualization/visualize.py | 0 test_environment.py | 25 -- tox.ini | 3 - 44 files changed, 1236 insertions(+), 867 deletions(-) rename {src/data => docs}/.gitkeep (100%) delete mode 100644 docs/Makefile create mode 100644 docs/README.md delete mode 100644 docs/commands.rst delete mode 100644 docs/conf.py rename docs/{getting-started.rst => docs/getting-started.md} (100%) create mode 100644 docs/docs/index.md delete mode 100644 docs/index.rst delete mode 100644 docs/make.bat create mode 100644 docs/mkdocs.yml create mode 100644 dvc.yaml create mode 100644 gnss/__init__.py create mode 100644 gnss/config.py create mode 100644 gnss/dataset.py create mode 100644 gnss/features.py create mode 100644 gnss/model.py rename {src => gnss/modeling}/__init__.py (100%) create mode 100644 gnss/modeling/train.py create mode 100644 gnss/plots.py create mode 100644 params.yaml create mode 100644 prepare_data.py create mode 100644 pyproject.toml create mode 100644 run_all_experiments.py create mode 100644 run_experiment.py delete mode 100644 setup.py delete mode 100644 src/data/__init__.py delete mode 100644 src/data/make_dataset.py delete mode 100644 src/features/.gitkeep delete mode 100644 src/features/__init__.py delete mode 100644 src/features/build_features.py delete mode 100644 src/models/.gitkeep delete mode 100644 src/models/__init__.py delete mode 100644 src/models/predict_model.py delete mode 100644 src/models/train_model.py delete mode 100644 src/visualization/.gitkeep delete mode 100644 src/visualization/__init__.py delete mode 100644 src/visualization/visualize.py delete mode 100644 test_environment.py delete mode 100644 tox.ini diff --git a/.gitignore b/.gitignore index 1532f1b..341a888 100644 --- a/.gitignore +++ b/.gitignore @@ -1,18 +1,3 @@ -### JupyterNotebooks ### -# gitignore template for Jupyter Notebooks -# website: http://jupyter.org/ - -.ipynb_checkpoints -*/.ipynb_checkpoints/* - -# IPython -profile_default/ -ipython_config.py - -# Remove previous ipynb_checkpoints -# git rm -r .ipynb_checkpoints/ - -### Linux ### *~ # temporary files which can be created if a process still has a handle open of a deleted file @@ -27,19 +12,6 @@ ipython_config.py # .nfs files are created when an open file is removed but is still being accessed .nfs* -### macOS ### -# General -.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - - -# Thumbnails -._* - # Files that might appear in the root of a volume .DocumentRevisions-V100 .fseventsd @@ -274,3 +246,36 @@ $RECYCLE.BIN/ # Windows shortcuts *.lnk +# ================================================================ +# DVC, podaci i rezultati eksperimenata (NAJVAŽNIJE!) +# ================================================================ +/data/ +/reports/ +/models/ +/outputs/ +.dvc/cache +/dvclive/ +/dvc_plots/ +/DvcLiveLogger/ + +# ================================================================ +# Python & Jupyter specifične datoteke +# ================================================================ +__pycache__/ +*.py[cod] +*$py.class +.ipynb_checkpoints/ + +# ================================================================ +# Virtualni Environment +# ================================================================ +.venv/ +venv/ +env/ + +# ================================================================ +# OS / Editor specifične datoteke (korisno) +# ================================================================ +.DS_Store +*~ +.vscode/ diff --git a/LICENSE b/LICENSE index 50fe88d..b47709e 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,10 @@ -MIT License -Copyright (c) 2025 SensorLab +The MIT License (MIT) +Copyright (c) 2025, Your name (or your organization/company/team) -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Makefile b/Makefile index 71a2029..59e6586 100644 --- a/Makefile +++ b/Makefile @@ -1,86 +1,71 @@ -.PHONY: clean data lint requirements sync_data_to_s3 sync_data_from_s3 - ################################################################################# # GLOBALS # ################################################################################# -PROJECT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) -BUCKET = [OPTIONAL] your-bucket-for-syncing-data (do not include 's3://') -PROFILE = default -PROJECT_NAME = project_name -PYTHON_INTERPRETER = python3 - -ifeq (,$(shell which conda)) -HAS_CONDA=False -else -HAS_CONDA=True -endif +PROJECT_NAME = gnss +PYTHON_VERSION = 3.10 +PYTHON_INTERPRETER = python ################################################################################# # COMMANDS # ################################################################################# -## Install Python Dependencies -requirements: test_environment - $(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel + +## Install Python dependencies +.PHONY: requirements +requirements: + $(PYTHON_INTERPRETER) -m pip install -U pip $(PYTHON_INTERPRETER) -m pip install -r requirements.txt + + -## Make Dataset -data: requirements - $(PYTHON_INTERPRETER) src/data/make_dataset.py data/raw data/processed ## Delete all compiled Python files +.PHONY: clean clean: find . -type f -name "*.py[co]" -delete find . -type d -name "__pycache__" -delete -## Lint using flake8 + +## Lint using ruff (use `make format` to do formatting) +.PHONY: lint lint: - flake8 src - -## Upload Data to S3 -sync_data_to_s3: -ifeq (default,$(PROFILE)) - aws s3 sync data/ s3://$(BUCKET)/data/ -else - aws s3 sync data/ s3://$(BUCKET)/data/ --profile $(PROFILE) -endif - -## Download Data from S3 -sync_data_from_s3: -ifeq (default,$(PROFILE)) - aws s3 sync s3://$(BUCKET)/data/ data/ -else - aws s3 sync s3://$(BUCKET)/data/ data/ --profile $(PROFILE) -endif - -## Set up python interpreter environment + ruff format --check + ruff check + +## Format source code with ruff +.PHONY: format +format: + ruff check --fix + ruff format + + + +## Run tests +.PHONY: test +test: + python -m pytest tests + + +## Set up Python interpreter environment +.PHONY: create_environment create_environment: -ifeq (True,$(HAS_CONDA)) - @echo ">>> Detected conda, creating conda environment." -ifeq (3,$(findstring 3,$(PYTHON_INTERPRETER))) - conda create --name $(PROJECT_NAME) python=3 -else - conda create --name $(PROJECT_NAME) python=2.7 -endif - @echo ">>> New conda env created. Activate with:\nsource activate $(PROJECT_NAME)" -else - $(PYTHON_INTERPRETER) -m pip install -q virtualenv virtualenvwrapper - @echo ">>> Installing virtualenvwrapper if not already installed.\nMake sure the following lines are in shell startup file\n\ - export WORKON_HOME=$$HOME/.virtualenvs\nexport PROJECT_HOME=$$HOME/Devel\nsource /usr/local/bin/virtualenvwrapper.sh\n" - @bash -c "source `which virtualenvwrapper.sh`;mkvirtualenv $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER)" + @bash -c "if [ ! -z `which virtualenvwrapper.sh` ]; then source `which virtualenvwrapper.sh`; mkvirtualenv $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER); else mkvirtualenv.bat $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER); fi" @echo ">>> New virtualenv created. Activate with:\nworkon $(PROJECT_NAME)" -endif + + -## Test python environment is setup correctly -test_environment: - $(PYTHON_INTERPRETER) test_environment.py ################################################################################# # PROJECT RULES # ################################################################################# +## Make dataset +.PHONY: data +data: requirements + $(PYTHON_INTERPRETER) gnss/dataset.py + ################################################################################# # Self Documenting Commands # @@ -88,57 +73,14 @@ test_environment: .DEFAULT_GOAL := help -# Inspired by -# sed script explained: -# /^##/: -# * save line in hold space -# * purge line -# * Loop: -# * append newline + line to hold space -# * go to next line -# * if line starts with doc comment, strip comment character off and loop -# * remove target prerequisites -# * append hold space (+ newline) to line -# * replace newline plus comments by `---` -# * print line -# Separate expressions are necessary because labels cannot be delimited by -# semicolon; see -.PHONY: help +define PRINT_HELP_PYSCRIPT +import re, sys; \ +lines = '\n'.join([line for line in sys.stdin]); \ +matches = re.findall(r'\n## (.*)\n[\s\S]+?\n([a-zA-Z_-]+):', lines); \ +print('Available rules:\n'); \ +print('\n'.join(['{:25}{}'.format(*reversed(match)) for match in matches])) +endef +export PRINT_HELP_PYSCRIPT + help: - @echo "$$(tput bold)Available rules:$$(tput sgr0)" - @echo - @sed -n -e "/^## / { \ - h; \ - s/.*//; \ - :doc" \ - -e "H; \ - n; \ - s/^## //; \ - t doc" \ - -e "s/:.*//; \ - G; \ - s/\\n## /---/; \ - s/\\n/ /g; \ - p; \ - }" ${MAKEFILE_LIST} \ - | LC_ALL='C' sort --ignore-case \ - | awk -F '---' \ - -v ncol=$$(tput cols) \ - -v indent=19 \ - -v col_on="$$(tput setaf 6)" \ - -v col_off="$$(tput sgr0)" \ - '{ \ - printf "%s%*s%s ", col_on, -indent, $$1, col_off; \ - n = split($$2, words, " "); \ - line_length = ncol - indent; \ - for (i = 1; i <= n; i++) { \ - line_length -= length(words[i]) + 1; \ - if (line_length <= 0) { \ - line_length = ncol - indent - length(words[i]) - 1; \ - printf "\n%*s ", -indent, " "; \ - } \ - printf "%s ", words[i]; \ - } \ - printf "\n"; \ - }' \ - | more $(shell test $(shell uname) = Darwin && echo '--no-init --raw-control-chars') + @$(PYTHON_INTERPRETER) -c "${PRINT_HELP_PYSCRIPT}" < $(MAKEFILE_LIST) diff --git a/README.md b/README.md index 8065e08..0614b01 100644 --- a/README.md +++ b/README.md @@ -1 +1,61 @@ -# GNSSGraphDetect \ No newline at end of file +# GNSS + + + + + +A short description of the project. + +## Project Organization + +``` +├── LICENSE <- Open-source license if one is chosen +├── Makefile <- Makefile with convenience commands like `make data` or `make train` +├── README.md <- The top-level README for developers using this project. +├── data +│ ├── external <- Data from third party sources. +│ ├── interim <- Intermediate data that has been transformed. +│ ├── processed <- The final, canonical data sets for modeling. +│ └── raw <- The original, immutable data dump. +│ +├── docs <- A default mkdocs project; see www.mkdocs.org for details +│ +├── models <- Trained and serialized models, model predictions, or model summaries +│ +├── notebooks <- Jupyter notebooks. Naming convention is a number (for ordering), +│ the creator's initials, and a short `-` delimited description, e.g. +│ `1.0-jqp-initial-data-exploration`. +│ +├── pyproject.toml <- Project configuration file with package metadata for +│ gnss and configuration for tools like black +│ +├── references <- Data dictionaries, manuals, and all other explanatory materials. +│ +├── reports <- Generated analysis as HTML, PDF, LaTeX, etc. +│ └── figures <- Generated graphics and figures to be used in reporting +│ +├── requirements.txt <- The requirements file for reproducing the analysis environment, e.g. +│ generated with `pip freeze > requirements.txt` +│ +├── setup.cfg <- Configuration file for flake8 +│ +└── gnss <- Source code for use in this project. + │ + ├── __init__.py <- Makes gnss a Python module + │ + ├── config.py <- Store useful variables and configuration + │ + ├── dataset.py <- Scripts to download or generate data + │ + ├── features.py <- Code to create features for modeling + │ + ├── modeling + │ ├── __init__.py + │ ├── predict.py <- Code to run model inference with trained models + │ └── train.py <- Code to train models + │ + └── plots.py <- Code to create visualizations +``` + +-------- + diff --git a/src/data/.gitkeep b/docs/.gitkeep similarity index 100% rename from src/data/.gitkeep rename to docs/.gitkeep diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 56a5c29..0000000 --- a/docs/Makefile +++ /dev/null @@ -1,153 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = _build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/project_name.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/project_name.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/project_name" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/project_name" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..79c1468 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,12 @@ +Generating the docs +---------- + +Use [mkdocs](http://www.mkdocs.org/) structure to update the documentation. + +Build locally with: + + mkdocs build + +Serve locally with: + + mkdocs serve diff --git a/docs/commands.rst b/docs/commands.rst deleted file mode 100644 index 2d162f3..0000000 --- a/docs/commands.rst +++ /dev/null @@ -1,10 +0,0 @@ -Commands -======== - -The Makefile contains the central entry points for common tasks related to this project. - -Syncing data to S3 -^^^^^^^^^^^^^^^^^^ - -* `make sync_data_to_s3` will use `aws s3 sync` to recursively sync files in `data/` up to `s3://[OPTIONAL] your-bucket-for-syncing-data (do not include 's3://')/data/`. -* `make sync_data_from_s3` will use `aws s3 sync` to recursively sync files from `s3://[OPTIONAL] your-bucket-for-syncing-data (do not include 's3://')/data/` to `data/`. diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 87b240c..0000000 --- a/docs/conf.py +++ /dev/null @@ -1,244 +0,0 @@ -# -*- coding: utf-8 -*- -# -# project_name documentation build configuration file, created by -# sphinx-quickstart. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import sys - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ----------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'project_name' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.1' -# The full version, including alpha/beta/rc tags. -release = '0.1' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'project_namedoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', - 'project_name.tex', - u'project_name Documentation', - u"Your name (or your organization/company/team)", 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'project_name', u'project_name Documentation', - [u"Your name (or your organization/company/team)"], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------------ - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'project_name', u'project_name Documentation', - u"Your name (or your organization/company/team)", 'project_name', - 'A short description of the project.', 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' diff --git a/docs/getting-started.rst b/docs/docs/getting-started.md similarity index 100% rename from docs/getting-started.rst rename to docs/docs/getting-started.md diff --git a/docs/docs/index.md b/docs/docs/index.md new file mode 100644 index 0000000..6ea5743 --- /dev/null +++ b/docs/docs/index.md @@ -0,0 +1,10 @@ +# GNSS documentation! + +## Description + +A short description of the project. + +## Commands + +The Makefile contains the central entry points for common tasks related to this project. + diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index 3302c62..0000000 --- a/docs/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. project_name documentation master file, created by - sphinx-quickstart. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -project_name documentation! -============================================== - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting-started - commands - - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index b9cc86d..0000000 --- a/docs/make.bat +++ /dev/null @@ -1,190 +0,0 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set BUILDDIR=_build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . -set I18NSPHINXOPTS=%SPHINXOPTS% . -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% - set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. singlehtml to make a single large HTML file - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. devhelp to make HTML files and a Devhelp project - echo. epub to make an epub - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. text to make text files - echo. man to make manual pages - echo. texinfo to make Texinfo files - echo. gettext to make PO message catalogs - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "singlehtml" ( - %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\project_name.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\project_name.ghc - goto end -) - -if "%1" == "devhelp" ( - %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. - goto end -) - -if "%1" == "epub" ( - %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The epub file is in %BUILDDIR%/epub. - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "text" ( - %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The text files are in %BUILDDIR%/text. - goto end -) - -if "%1" == "man" ( - %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The manual pages are in %BUILDDIR%/man. - goto end -) - -if "%1" == "texinfo" ( - %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. - goto end -) - -if "%1" == "gettext" ( - %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The message catalogs are in %BUILDDIR%/locale. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - if errorlevel 1 exit /b 1 - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - if errorlevel 1 exit /b 1 - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - if errorlevel 1 exit /b 1 - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -:end diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml new file mode 100644 index 0000000..8076416 --- /dev/null +++ b/docs/mkdocs.yml @@ -0,0 +1,4 @@ +site_name: GNSS +# +site_author: Your name (or your organization/company/team) +# \ No newline at end of file diff --git a/dvc.yaml b/dvc.yaml new file mode 100644 index 0000000..436e106 --- /dev/null +++ b/dvc.yaml @@ -0,0 +1,25 @@ +stages: + prepare_data: + cmd: python prepare_data.py + deps: + - prepare_data.py + - params.yaml + - gnss/ + outs: + - ${output_dir} + train: + cmd: python run_experiment.py + deps: + - run_experiment.py + - gnss/ + - params.yaml + - ${output_dir} + outs: + #- ${train.output_dir}/best_full_model_lightning.pth + - ${train.output_dir}/best_model.ckpt + - ${train.output_dir}/dvclive/metrics.json: + cache: false + - ${train.output_dir}/dvclive/plots: + cache: false + metrics: + - ${train.output_dir}/metrics.yaml diff --git a/gnss/__init__.py b/gnss/__init__.py new file mode 100644 index 0000000..44a1789 --- /dev/null +++ b/gnss/__init__.py @@ -0,0 +1 @@ +from gnss import config # noqa: F401 diff --git a/gnss/config.py b/gnss/config.py new file mode 100644 index 0000000..1abed2a --- /dev/null +++ b/gnss/config.py @@ -0,0 +1,28 @@ +from pathlib import Path +from dotenv import load_dotenv +from loguru import logger +import random +import numpy as np +import torch + +# Paths +PROJ_ROOT = Path(__file__).resolve().parents[1] +logger.info(f"PROJ_ROOT path is: {PROJ_ROOT}") + +DATA_DIR = PROJ_ROOT / "data" +PARSED_DATA_DIR = DATA_DIR / "parsed" +RAW_DATA_DIR = DATA_DIR / "raw" +PROCESSED_DATA_DIR = DATA_DIR / "processed" +MODELS_DIR = PROJ_ROOT / "models" +REPORTS_DIR = PROJ_ROOT / "reports" +FIGURES_DIR = REPORTS_DIR / "figures" + +# If tqdm is installed, configure loguru with tqdm.write +try: + from tqdm import tqdm + logger.remove(0) + logger.add(lambda msg: tqdm.write(msg, end=""), colorize=True) +except ModuleNotFoundError: + pass + +window_size = 10 \ No newline at end of file diff --git a/gnss/dataset.py b/gnss/dataset.py new file mode 100644 index 0000000..ee6baa6 --- /dev/null +++ b/gnss/dataset.py @@ -0,0 +1,162 @@ +import os +import numpy as np +import pandas as pd +from torch.utils.data import Dataset, DataLoader, ConcatDataset +from sklearn.preprocessing import StandardScaler +from torch_geometric_temporal.signal import DynamicHeteroGraphTemporalSignal + +from .config import window_size + +# 1) Single measurement loader: + +def load_and_process_single_measurement(sats_csv_path, receiver_csv_path): + + sats_df_meas = pd.read_csv(sats_csv_path) + receiver_df_meas = pd.read_csv(receiver_csv_path) + time_steps_meas = sorted(receiver_df_meas['T_ID'].unique()) + + feature_dicts_meas = [] + target_dicts_meas = [] + edge_index_dicts_meas = [] + additional_sids_dicts = [] + + for t_local in time_steps_meas: + rec = receiver_df_meas[receiver_df_meas['T_ID'] == t_local].iloc[0] + feat_rec = rec[['Lat', 'Lon']].to_numpy().reshape(1, 2) + targ_rec = rec[['LatDev', 'LonDev']].to_numpy().reshape(1, 2) + + sats_t = sats_df_meas[sats_df_meas['T_ID'] == t_local].sort_values('S_ID') + feat_sat = sats_t[['SNR', 'az', 'el']].to_numpy() + s_ids_sat = sats_t['S_ID'].values.astype(np.int64) + n_sat = feat_sat.shape[0] + +# if n_sat > 0 and n_sat != len(s_ids_sat): +# s_ids_sat = s_ids_sat[:n_sat] +# elif n_sat == 0 and len(s_ids_sat) > 0: +# s_ids_sat = np.array([], dtype=np.int64) + + src = np.zeros(n_sat, dtype=int) + dst = np.arange(n_sat, dtype=int) + edges = np.vstack([src, dst]) + edges_rev = edges[::-1].copy() + + feature_dicts_meas.append({ + 'receiver': feat_rec, + 'satellite': feat_sat + }) + target_dicts_meas.append({ + 'receiver': targ_rec + }) + edge_index_dicts_meas.append({ + ('receiver', 'to', 'satellite'): edges, + ('satellite', 'rev_to', 'receiver'): edges_rev + }) + additional_sids_dicts.append({ + 'satellite_s_ids': s_ids_sat + }) + + edge_weight_dicts_meas = [None] * len(time_steps_meas) + return ( + feature_dicts_meas, + target_dicts_meas, + edge_index_dicts_meas, + edge_weight_dicts_meas, + time_steps_meas, + additional_sids_dicts + ) + +# 2) Load and preprocess measurements: + +def load_all_measurements(measurement_files): + all_measurements_processed = [] + for m_info in measurement_files: + features, targets, edges, weights, times, sids_per_ts = ( + load_and_process_single_measurement( + m_info["sats"], + m_info["receiver"] + ) + ) + + all_measurements_processed.append({ + "id": m_info["id"], + "features": features, + "targets": targets, + "edges": edges, + "weights": weights, + "time_steps": times, + "satellite_s_ids": sids_per_ts + }) + + return all_measurements_processed + +# 3) Aggregation for normalization: + +def aggregate_for_normalization(train_measurements_data): + agg_train_rec_feats = [] + agg_train_sat_feats = [] + agg_train_targ_rec = [] + for meas_data in train_measurements_data: + num_ts = len(meas_data["features"]) + for i in range(num_ts): + fr = meas_data["features"][i]['receiver'] + agg_train_rec_feats.append(fr) + fs = meas_data["features"][i]['satellite'] + if fs.size > 0: + agg_train_sat_feats.append(fs) + tr = meas_data["targets"][i]['receiver'] + agg_train_targ_rec.append(tr) + return agg_train_rec_feats, agg_train_sat_feats, agg_train_targ_rec + +# 4) Fit StandardScalers: + +def fit_standard_scalers(rec_feats_np, sat_feats_np, targ_rec_np): + rec_scaler = StandardScaler().fit(rec_feats_np) + targ_scaler = StandardScaler().fit(targ_rec_np) + sat_scaler = StandardScaler().fit(sat_feats_np) + return rec_scaler, sat_scaler, targ_scaler + +# 5) create_signals: + +def create_signals(measurements): + signals = [] + for meas_data in measurements: + signal = DynamicHeteroGraphTemporalSignal( + edge_index_dicts = meas_data["edges"], + edge_weight_dicts = meas_data["weights"], + feature_dicts = meas_data["features"], + target_dicts = meas_data["targets"], + satellite_s_ids = meas_data["satellite_s_ids"] + ) + signals.append(signal) + return signals + +# 6) SlidingWindowDataset & build_loader: + +class SlidingWindowDataset(Dataset): + def __init__(self, signal, window_size, stride=1): + self.signal = signal + self.window_size = window_size + self.stride = stride + def __len__(self): + return max(0, (self.signal.snapshot_count - self.window_size) // self.stride + 1) + def __getitem__(self, idx): + start = idx * self.stride + end = start + self.window_size + return [self.signal[t] for t in range(start, end)] + +def build_loader(signals, window_size, shuffle, stride=1): + datasets = [] + for sig in signals: + ds = SlidingWindowDataset(sig, window_size, stride=stride) + if len(ds) > 0: + datasets.append(ds) + if not datasets: + return None + concat = ConcatDataset(datasets) + return DataLoader( + concat, + batch_size=1, + shuffle=shuffle, + collate_fn=lambda batch: batch[0] + ) + diff --git a/gnss/features.py b/gnss/features.py new file mode 100644 index 0000000..4430045 --- /dev/null +++ b/gnss/features.py @@ -0,0 +1,36 @@ +import numpy as np + +def normalize_with_scalers(measurement_data_list, rec_scaler, sat_scaler, targ_scaler): + normalized_measurements = [] + for meas_data in measurement_data_list: + norm_feat_dicts = [] + norm_targ_dicts = [] + norm_sids_list = [] + num_ts = len(meas_data["features"]) + for i in range(num_ts): + fr = meas_data["features"][i]['receiver'] + fs = meas_data["features"][i]['satellite'] + sids = meas_data["satellite_s_ids"][i]['satellite_s_ids'] + tr = meas_data["targets"][i]['receiver'] + norm_fr = rec_scaler.transform(fr) + norm_tr = targ_scaler.transform(tr) + if fs.size > 0: + norm_fs = sat_scaler.transform(fs) + else: + norm_fs = fs.copy() + norm_feat_dicts.append({ + 'receiver': norm_fr, + 'satellite': norm_fs + }) + norm_targ_dicts.append({ + 'receiver': norm_tr + }) + norm_sids_list.append({'satellite_s_ids': sids.copy()}) + new_meas = { + **meas_data, + "features": norm_feat_dicts, + "targets": norm_targ_dicts, + "satellite_s_ids": norm_sids_list + } + normalized_measurements.append(new_meas) + return normalized_measurements diff --git a/gnss/model.py b/gnss/model.py new file mode 100644 index 0000000..92cc444 --- /dev/null +++ b/gnss/model.py @@ -0,0 +1,70 @@ +import torch +import torch.nn as nn +import numpy as np +from torch_geometric_temporal.nn.hetero import HeteroGCLSTM + + +class FullModel(nn.Module): + + def __init__(self, in_channels_dict, hidden_dim, metadata, dropout_rate=0.1): + super().__init__() + + self.gclstm = HeteroGCLSTM( + in_channels_dict=in_channels_dict, + out_channels=hidden_dim, + metadata=metadata + ) + + self.dropout = nn.Dropout(dropout_rate) + + self.linear_out = nn.Linear(hidden_dim, 2) + + def forward(self, window_snapshots, device): + + hidden_dim = self.linear_out.in_features + + + h_state = {'receiver': torch.zeros(hidden_dim, device=device)} + c_state = {'receiver': torch.zeros(hidden_dim, device=device)} + + for snapshot in window_snapshots[:-1]: + x_dict_for_gclstm = snapshot.x_dict + eidx_on_device = snapshot.edge_index_dict + + s_ids_val = snapshot['satellite_s_ids']['satellite_s_ids'] + + + num_sat = s_ids_val.shape[0] + + h_sat = torch.zeros((num_sat, hidden_dim), device=device) + c_sat = torch.zeros((num_sat, hidden_dim), device=device) + + rec_h = h_state['receiver'].unsqueeze(0) + rec_c = c_state['receiver'].unsqueeze(0) + + for j, sid_tensor in enumerate(s_ids_val): + sid_key = sid_tensor.item() + if sid_key in h_state: + h_sat[j] = h_state[sid_key] + c_sat[j] = c_state[sid_key] + + h_dict_step = {'receiver': rec_h, 'satellite': h_sat} + c_dict_step = {'receiver': rec_c, 'satellite': c_sat} + + h_out, c_out = self.gclstm(x_dict_for_gclstm, eidx_on_device, h_dict_step, c_dict_step) + + h_state['receiver'] = h_out['receiver'][0] + c_state['receiver'] = c_out['receiver'][0] + + for j, sid_tensor in enumerate(s_ids_val): + sid_key = sid_tensor.item() + h_state[sid_key] = h_out['satellite'][j] + c_state[sid_key] = c_out['satellite'][j] + + h_final = h_state['receiver'].unsqueeze(0) + h_dropped = self.dropout(h_final) + pred_norm = self.linear_out(h_dropped) + true_norm = window_snapshots[-1].y_dict['receiver'] + + return pred_norm, true_norm + diff --git a/src/__init__.py b/gnss/modeling/__init__.py similarity index 100% rename from src/__init__.py rename to gnss/modeling/__init__.py diff --git a/gnss/modeling/train.py b/gnss/modeling/train.py new file mode 100644 index 0000000..5c4e8c1 --- /dev/null +++ b/gnss/modeling/train.py @@ -0,0 +1,155 @@ +import torch +import pytorch_lightning as pl +import torch.nn as nn +import numpy as np +from dvclive.lightning import DVCLiveLogger +from gnss.dataset import build_loader +from torch.utils.data import DataLoader +from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint +import os +import pandas as pd + +from gnss.model import FullModel +from gnss.model2 import FullModelTwoLayer + + +class LightningFullModel(pl.LightningModule): + + def __init__(self, model_class, in_channels_dict, hidden_dim, metadata, + scalers_tv, scalers_test, + initial_lr, weight_decay_val): + super().__init__() + self.save_hyperparameters('hidden_dim', 'initial_lr', 'weight_decay_val') + self.model = model_class(in_channels_dict, hidden_dim, metadata) + self.loss_fn = nn.SmoothL1Loss(beta=1e-2) + + self.scalers_for_train_val = scalers_tv + self.scalers_for_test = scalers_test + + def forward(self, window_snapshots): + return self.model(window_snapshots, device=self.device) + + def _calculate_loss(self, pred_norm, true_norm): + lat_p, lon_p = pred_norm[:, 0], pred_norm[:, 1] + lat_t, lon_t = true_norm[:, 0], true_norm[:, 1] + loss_lat = self.loss_fn(lat_p.unsqueeze(1), lat_t.unsqueeze(1)) + loss_lon = self.loss_fn(lon_p.unsqueeze(1), lon_t.unsqueeze(1)) + return loss_lat + loss_lon + + def training_step(self, batch, batch_idx): + pred_norm, true_norm = self(batch) + loss = self._calculate_loss(pred_norm, true_norm) + self.log('train_loss', loss, on_step=False, on_epoch=True, prog_bar=True, batch_size=1) + return loss + + def validation_step(self, batch, batch_idx): + pred_norm, true_norm = self(batch) + loss = self._calculate_loss(pred_norm, true_norm) + self.log('val_loss', loss, on_epoch=True, prog_bar=True, batch_size=1) + + targ_scaler = self.scalers_for_train_val['targ'] + pred_cm = targ_scaler.inverse_transform(pred_norm.cpu().numpy()) + true_cm = targ_scaler.inverse_transform(true_norm.cpu().numpy()) + + mae_lat = np.abs(pred_cm[:, 0] - true_cm[:, 0]).mean() + mae_lon = np.abs(pred_cm[:, 1] - true_cm[:, 1]).mean() + diffs = np.sqrt((pred_cm[:, 0] - true_cm[:, 0])**2 + (pred_cm[:, 1] - true_cm[:, 1])**2) + mae_sum = diffs.mean() + + self.log('val_mae_lat_cm', mae_lat, on_epoch=True, prog_bar=False, batch_size=1) + self.log('val_mae_lon_cm', mae_lon, on_epoch=True, prog_bar=False, batch_size=1) + self.log('val_mae_sum_cm', mae_sum, on_epoch=True, prog_bar=True, batch_size=1) + return loss + + def test_step(self, batch, batch_idx): + pred_norm, true_norm = self(batch) + loss = self._calculate_loss(pred_norm, true_norm) + self.log('test_loss', loss, on_epoch=True, prog_bar=True, batch_size=1) + + targ_scaler = self.scalers_for_test['targ'] + pred_cm = targ_scaler.inverse_transform(pred_norm.cpu().numpy()) + true_cm = targ_scaler.inverse_transform(true_norm.cpu().numpy()) + + mae_lat = np.abs(pred_cm[:, 0] - true_cm[:, 0]).mean() + mae_lon = np.abs(pred_cm[:, 1] - true_cm[:, 1]).mean() + diffs = np.sqrt((pred_cm[:, 0] - true_cm[:, 0])**2 + (pred_cm[:, 1] - true_cm[:, 1])**2) + mae_sum = diffs.mean() + + self.log('test_mae_lat_cm', mae_lat, on_epoch=True, prog_bar=False, batch_size=1) + self.log('test_mae_lon_cm', mae_lon, on_epoch=True, prog_bar=False, batch_size=1) + self.log('test_mae_sum_cm', mae_sum, on_epoch=True, prog_bar=True, batch_size=1) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.initial_lr, weight_decay=self.hparams.weight_decay_val) + return optimizer + + + +def train_lightning_model( + train_signals, val_signals, test_signals, + scalers_for_train_val, scalers_for_test, + metadata, config +): + prepare_cfg = config['prepare_data'] + train_cfg = config['train'] + + window_size = prepare_cfg['window_size'] + stride = prepare_cfg['stride'] + + train_loader = build_loader(train_signals, window_size, shuffle=True, stride=stride) + val_loader = build_loader(val_signals, window_size, shuffle=False, stride=stride) + test_loader = build_loader(test_signals, window_size, shuffle=False, stride=stride) + + if train_cfg['model_name'] == 'FullModel': + model_class_to_use = FullModel + else: + model_class_to_use = FullModelTwoLayer + + model = LightningFullModel( + model_class=model_class_to_use, + in_channels_dict={'receiver': 2, 'satellite': 3}, + hidden_dim=train_cfg['hidden_dim'], + metadata=metadata, + scalers_tv=scalers_for_train_val, + scalers_test=scalers_for_test, + initial_lr=train_cfg['initial_lr'], + weight_decay_val=train_cfg['weight_decay_val'] + ) + + early_stop_callback = EarlyStopping(**train_cfg['early_stopping']) + + + checkpoint_callback = ModelCheckpoint( + dirpath=train_cfg['output_dir'], # Ova putanja dolazi iz 'train' sekcije + filename='best_model', + save_top_k=1, + monitor=train_cfg['early_stopping']['monitor'], + mode=train_cfg['early_stopping']['mode'] + ) + + + trainer = pl.Trainer( + max_epochs=train_cfg['n_epochs'], + log_every_n_steps=10, + callbacks=[checkpoint_callback, early_stop_callback], + logger=config['logger'], + accelerator=train_cfg.get('accelerator'), + devices=train_cfg.get('devices') + ) + + trainer.fit(model, train_loader, val_loader) + + print("\n--- Training done ---") + print(f"Best model: {checkpoint_callback.best_model_path}") + if checkpoint_callback.best_model_score: + print(f"Best value/metric '{train_cfg['early_stopping']['monitor']}': {checkpoint_callback.best_model_score:.4f}") + + print("\n--- Testing on test set ---") + trainer.test(dataloaders=test_loader, ckpt_path='best', verbose=True) + + final_metrics = trainer.callback_metrics + print("\n--- Final metrics on the test set ---") + print(final_metrics) + + return trainer, checkpoint_callback.best_model_path, final_metrics \ No newline at end of file diff --git a/gnss/plots.py b/gnss/plots.py new file mode 100644 index 0000000..f28d494 --- /dev/null +++ b/gnss/plots.py @@ -0,0 +1,32 @@ +import matplotlib.pyplot as plt + +def plot_loss_curves(epochs, train_losses, test_losses, save_path=None): + plt.figure(figsize=(8, 4)) + plt.plot(epochs, train_losses, marker='o', label='Train Loss') + plt.plot(epochs, test_losses, marker='o', label='Test Loss') + plt.xlabel('Epoch') + plt.ylabel('SmoothL1 Loss') + plt.title('Training vs. Validation Loss by Epoch') + plt.legend() + plt.grid(True) + if save_path: + plt.savefig(save_path, bbox_inches='tight') + plt.close() + else: + plt.show() + +def plot_mae_multi_curve(epochs, mae_lat, mae_lon, mae_sum, save_path=None): + plt.figure(figsize=(8, 4)) + plt.plot(epochs, mae_lat, marker='o', color='tab:blue', label='Val MAE Lat (cm)') + plt.plot(epochs, mae_lon, marker='o', color='tab:green', label='Val MAE Lon (cm)') + plt.plot(epochs, mae_sum, marker='o', color='tab:orange', label='Val MAE Sum (cm)') + plt.xlabel('Epoch') + plt.ylabel('MAE (cm)') + plt.title('Validation MAE by Epoch (Lat, Lon, Sum)') + plt.legend() + plt.grid(True) + if save_path: + plt.savefig(save_path, bbox_inches='tight') + plt.close() + else: + plt.show() \ No newline at end of file diff --git a/params.yaml b/params.yaml new file mode 100644 index 0000000..fe81f34 --- /dev/null +++ b/params.yaml @@ -0,0 +1,41 @@ +# ================================================================ +# Glavni parametri eksperimenata +# ================================================================ +receiver_name: Ublox10 +jamming_type: cw +jamming_power: '-50' +seed: 42 + +# ================================================================ +# Parametri za prepare_data.py +# ================================================================ +prepare_data: + train_ratio: 0.7 + val_ratio: 0.15 + + window_size: 10 + stride: 10 + +# ================================================================ +# Parametri za train.py +# ================================================================ +train: + model_name: "FullModel" + n_epochs: 40 + hidden_dim: 128 + + # optimizator + initial_lr: 0.0001 + weight_decay_val: 0.01 + + # Early Stopping + early_stopping: + monitor: val_loss + patience: 5 + mode: 'min' + min_delta: 0.001 + verbose: True + + accelerator: "cuda" + devices: 1 + diff --git a/prepare_data.py b/prepare_data.py new file mode 100644 index 0000000..55b25dd --- /dev/null +++ b/prepare_data.py @@ -0,0 +1,221 @@ +import os +import torch +import yaml +import numpy as np +import random +from pathlib import Path +from gnss import dataset +from gnss.features import normalize_with_scalers +from gnss.config import PARSED_DATA_DIR + +# (1) Load parameters: +with open('params.yaml', 'r') as f: + params = yaml.safe_load(f) + +# Read exp group parameters +receiver_name = params['receiver_name'] +jamming_type = params['jamming_type'] +jamming_power = params['jamming_power'] +seed = params['seed'] +window_size = params['prepare_data']['window_size'] +stride = params['prepare_data']['stride'] +train_ratio = params['prepare_data'].get('train_ratio', 0.7) +val_ratio = params['prepare_data'].get('val_ratio', 0.15) +assert train_ratio + val_ratio < 1.0, "train_ratio + val_ratio must be < 1.0" + +# (2) Set up input/output dirs: + +input_dir = os.path.join(PARSED_DATA_DIR, str(receiver_name), str(jamming_type), str(jamming_power)) +output_dir = params.get("output_dir") +if not output_dir: + raise ValueError("No global 'output_dir' found in params.yaml!") +os.makedirs(output_dir, exist_ok=True) +print(f" INFO - Using input directory: {input_dir}") +print(f" INFO - Saving outputs to: {output_dir}") + + +# (2) Discover measurement files (one per ts): + +def get_measurement_definitions(base_path): + measurement_definitions = [] + base_path = Path(base_path) + print(f" DEBUG - Scanning for measurement folders in: {base_path}") + for subdir in sorted(base_path.iterdir()): + if subdir.is_dir() and subdir.name.startswith('R'): + sats_file = subdir / 'sats_data.csv' + rec_file = subdir / 'reciever_data.csv' + if sats_file.exists() and rec_file.exists(): + measurement_definitions.append({ + "id": subdir.name, + "sats": str(sats_file), + "receiver": str(rec_file), + }) + print(f" DEBUG - Found measurement: {subdir.name} | sats: {sats_file} | rec: {rec_file}") + else: + print(f" WARNING - Missing sats or receiver file in {subdir}") + print(f" INFO -Found {len(measurement_definitions)} measurement pairs in {base_path}") + return measurement_definitions + +measurement_defs = get_measurement_definitions(input_dir) +if not measurement_defs: + print(f" WARNING - No valid measurement pairs found in {input_dir}.") + exit(0) + +# (3) Split: + +random.seed(seed) +random.shuffle(measurement_defs) + +n = len(measurement_defs) +n_train = int(train_ratio * n) +n_val = int(val_ratio * n) + +train_defs = measurement_defs[:n_train] +val_defs = measurement_defs[n_train : n_train + n_val] +test_defs = measurement_defs[n_train + n_val :] +print(f"Split: {len(train_defs)} train, {len(val_defs)} val, {len(test_defs)} test time series") +print("Train IDs:", [d['id'] for d in train_defs]) +print("Val IDs:", [d['id'] for d in val_defs]) +print("Test IDs:", [d['id'] for d in test_defs]) + + +######################################################################### + +print("\n--- FAZA 1: Obrada Trening i Validacijskih podataka ---") + +print(" INFO - Učitavanje trening i validacijskih mjerenja...") +train_measurements = dataset.load_all_measurements(train_defs) +val_measurements = dataset.load_all_measurements(val_defs) + +print(" INFO - Fitanje 'train' scalera na trening podacima") +agg_train_rec_feats, agg_train_sat_feats, agg_train_targ_rec = dataset.aggregate_for_normalization(train_measurements) +if not all(len(x) > 0 for x in [agg_train_rec_feats, agg_train_sat_feats, agg_train_targ_rec]): + raise ValueError("Nema podataka za fitanje trening scalera.") + +agg_train_rec_feats_2d = np.vstack(agg_train_rec_feats) +agg_train_sat_feats_2d = np.vstack(agg_train_sat_feats) if agg_train_sat_feats else np.empty((0,3)) +agg_train_targ_rec_2d = np.vstack(agg_train_targ_rec) + +rec_scaler_train, sat_scaler_train, targ_scaler_train = dataset.fit_standard_scalers( + agg_train_rec_feats_2d, agg_train_sat_feats_2d, agg_train_targ_rec_2d +) + +print("INFO - Normalizacija trening i validacijskih podataka...") +normalized_train_measurements = normalize_with_scalers(train_measurements, rec_scaler_train, sat_scaler_train, targ_scaler_train) +normalized_val_measurements = normalize_with_scalers(val_measurements, rec_scaler_train, sat_scaler_train, targ_scaler_train) + +train_signals = dataset.create_signals(normalized_train_measurements) +val_signals = dataset.create_signals(normalized_val_measurements) + +#################################################################### + +print("\n--- FAZA 2: Obrada Testnih podataka ---") + +print(" INFO - Učitavanje testnih mjerenja...") +test_measurements = dataset.load_all_measurements(test_defs) + +if test_measurements: + print(" INFO - Fitanje 'test' scalera na testnim podacima...") + agg_test_rec_feats, agg_test_sat_feats, agg_test_targ_rec = dataset.aggregate_for_normalization(test_measurements) + if not all(len(x) > 0 for x in [agg_test_rec_feats, agg_test_sat_feats, agg_test_targ_rec]): + raise ValueError("Nema podataka za fitanje test scalera.") + + agg_test_rec_feats_2d = np.vstack(agg_test_rec_feats) + agg_test_sat_feats_2d = np.vstack(agg_test_sat_feats) if agg_test_sat_feats else np.empty((0,3)) + agg_test_targ_rec_2d = np.vstack(agg_test_targ_rec) + + rec_scaler_test, sat_scaler_test, targ_scaler_test = dataset.fit_standard_scalers( + agg_test_rec_feats_2d, agg_test_sat_feats_2d, agg_test_targ_rec_2d + ) + + print(" INFO - Normalizacija testnih podataka...") + normalized_test_measurements = normalize_with_scalers(test_measurements, rec_scaler_test, sat_scaler_test, targ_scaler_test) + test_signals = dataset.create_signals(normalized_test_measurements) +else: + print(" WARNIGN - Nema testnih podataka za obradu. Testni skup će biti prazan.") + test_signals = [] + rec_scaler_test, sat_scaler_test, targ_scaler_test = None, None, None + + +print(f"\nINFO - Spremanje svih podataka i scalera u: {output_dir}") + +torch.save(train_signals, os.path.join(output_dir, 'train_graphs.pt')) +torch.save(val_signals, os.path.join(output_dir, 'val_graphs.pt')) +torch.save(test_signals, os.path.join(output_dir, 'test_graphs.pt')) + +print("INFO - Spremanje 'train' seta scalera...") +torch.save(rec_scaler_train, os.path.join(output_dir, 'rec_scaler_train.pt')) +torch.save(sat_scaler_train, os.path.join(output_dir, 'sat_scaler_train.pt')) +torch.save(targ_scaler_train, os.path.join(output_dir, 'targ_scaler_train.pt')) + +if rec_scaler_test: + print("INFO - Spremanje 'test' seta scalera...") + torch.save(rec_scaler_test, os.path.join(output_dir, 'rec_scaler_test.pt')) + torch.save(sat_scaler_test, os.path.join(output_dir, 'sat_scaler_test.pt')) + torch.save(targ_scaler_test, os.path.join(output_dir, 'targ_scaler_test.pt')) + +print("\nPriprema podataka je završena.") + + +''' +# (4) Load and process train measurements only for scaler fitting +train_measurements = dataset.load_all_measurements(train_defs, window_size) +val_measurements = dataset.load_all_measurements(val_defs, window_size) +test_measurements = dataset.load_all_measurements(test_defs, window_size) + +agg_train_rec_feats, agg_train_sat_feats, agg_train_targ_rec = dataset.aggregate_for_normalization(train_measurements) +if len(agg_train_rec_feats) == 0 or len(agg_train_sat_feats) == 0 or len(agg_train_targ_rec) == 0: + print(f"[WARNING] No valid features found for training in {input_dir}. Skipping.") + exit(0) + +try: + # Receiver features: stack to [N, F] + agg_train_rec_feats_2d = np.vstack(agg_train_rec_feats) +except Exception as e: + print(f"WARNING- Could not stack receiver features: {e}. Skipping.") + exit(0) + +# Satellite features: stack all non-empty, check all have isti broj feature-a +non_empty_sat_feats = [fs for fs in agg_train_sat_feats if fs.size > 0] +if non_empty_sat_feats: + n_features = non_empty_sat_feats[0].shape[1] + if not all(fs.shape[1] == n_features for fs in non_empty_sat_feats): + print(f"WARNING - Inconsistent satellite feature dimensions (columns) in {input_dir}. Skipping.") + exit(0) + agg_train_sat_feats_2d = np.vstack(non_empty_sat_feats) +else: + n_features = agg_train_rec_feats_2d.shape[1] if agg_train_rec_feats_2d.size > 0 else 0 + agg_train_sat_feats_2d = np.empty((0, n_features)) + +try: + agg_train_targ_rec_2d = np.vstack(agg_train_targ_rec) +except Exception as e: + print(f"WARNING - Could not stack target features: {e}. Skipping.") + exit(0) + +# (5) Fit scalers on train set + +rec_scaler, sat_scaler, targ_scaler = dataset.fit_standard_scalers( + agg_train_rec_feats_2d, agg_train_sat_feats_2d, agg_train_targ_rec_2d) + +# (6) Normalize sve skupove koristeci train scalere +normalized_train_measurements = normalize_with_scalers(train_measurements, rec_scaler, sat_scaler, targ_scaler) +normalized_val_measurements = normalize_with_scalers(val_measurements, rec_scaler, sat_scaler, targ_scaler) +normalized_test_measurements = normalize_with_scalers(test_measurements, rec_scaler, sat_scaler, targ_scaler) + +# (7) Convert to graph objects (signals) +train_signals = dataset.create_signals(normalized_train_measurements) +val_signals = dataset.create_signals(normalized_val_measurements) +test_signals = dataset.create_signals(normalized_test_measurements) + +# (8) Save outputs +print(f"Saving {len(train_signals)} train, {len(val_signals)} val, {len(test_signals)} test graphs to {output_dir}") +torch.save(train_signals, os.path.join(output_dir, 'train_graphs.pt')) +torch.save(val_signals, os.path.join(output_dir, 'val_graphs.pt')) +torch.save(test_signals, os.path.join(output_dir, 'test_graphs.pt')) +torch.save(rec_scaler, os.path.join(output_dir, 'rec_scaler.pt')) +torch.save(sat_scaler, os.path.join(output_dir, 'sat_scaler.pt')) +torch.save(targ_scaler, os.path.join(output_dir, 'targ_scaler.pt')) + +print("Data preparation complete.") +''' \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..3d3a625 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,32 @@ +[build-system] +requires = ["flit_core >=3.2,<4"] +build-backend = "flit_core.buildapi" + +[project] +name = "gnss" +version = "0.0.1" +description = "A short description of the project." +authors = [ + { name = "Your name (or your organization/company/team)" }, +] +license = { file = "LICENSE" } +readme = "README.md" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License" +] +requires-python = "~=3.10.0" + + +[tool.ruff] +line-length = 99 +src = ["gnss"] +include = ["pyproject.toml", "gnss/**/*.py"] + +[tool.ruff.lint] +extend-select = ["I"] # Add import sorting + +[tool.ruff.lint.isort] +known-first-party = ["gnss"] +force-sort-within-sections = true + diff --git a/requirements.txt b/requirements.txt index fe01299..ff92702 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,36 +1,41 @@ -# —–– lokalni paket (editable install) --e . - --extra-index-url https://download.pytorch.org/whl/cu124 --find-links https://data.pyg.org/whl/torch-2.4.0+cu124.html -# —–– cookiecutter default external requirements: -click -Sphinx -coverage -awscli -flake8 -python-dotenv>=0.5.1 - -# —–– Data science & ML libraries: -numpy -scipy -pandas +ipython +jupyterlab +loguru matplotlib -scikit-learn +mkdocs +notebook +#numpy==1.24.4 +#scipy +#pandas==1.5.3 +#pip +pytest +python-dotenv +#scikit-learn +tqdm +typer +-e . -# —–– PyTorch + CUDA build: +# PyTorch core torch==2.4.0+cu124 torchvision==0.19.0+cu124 torchaudio==2.4.0+cu124 # —–– PyG low-level CUDA kerneli: -pyg_lib -torch_scatter==2.1.2 -torch_sparse==0.6.17 -torch_cluster==1.6.1 -torch_spline_conv==1.2.2 +pyg_lib==0.4.0+pt24cu124 +torch_scatter==2.1.2+pt24cu124 +torch_sparse==0.6.18+pt24cu124 +torch_cluster==1.6.3+pt24cu124 +torch_spline_conv==1.2.2+pt24cu124 # —–– PyG high-level: torch-geometric==2.3.1 -torch-geometric-temporal==0.54.0 +torch-geometric-temporal==0.56.0 +pytorch-lightning==2.2.4 +# DVC for experiment tracking +dvc +dvclive +# YAML parsing +pyyaml \ No newline at end of file diff --git a/run_all_experiments.py b/run_all_experiments.py new file mode 100644 index 0000000..8217772 --- /dev/null +++ b/run_all_experiments.py @@ -0,0 +1,140 @@ +import argparse +import subprocess +import sys +from pathlib import Path +import yaml +import itertools + +DATA_ROOT = Path("~/shared/Ivana_GNN/Sateliti/GNSSGraphDetect/data/parsed").expanduser() +PARAMS_FILE = Path("params.yaml") +EXCLUDED_JAMMING_TYPES = {"none", ".ipynb_checkpoints"} +EXCLUDED_RECEIVERS = {"Ublox6", ".ipynb_checkpoints"} + +def _split(comma_separated: str | None) -> set | None: + return set(comma_separated.split(",")) if comma_separated else None + +def parse_args() -> argparse.Namespace: + p = argparse.ArgumentParser(description="Run GNSS-Graph-Detect experiments.", allow_abbrev=False) + p.add_argument("-r", "--receivers", help="Comma-separated list of receivers to INCLUDE") + p.add_argument("-j", "--jams", help="Comma-separated list of jamming types to INCLUDE") + p.add_argument("-p", "--powers", help="Comma-separated list of jamming power folders to INCLUDE") + p.add_argument("--seeds", help="Comma-separated list of seeds (default: 42,145,156,5,85)") + p.add_argument("--dry-run", action="store_true", help="Print planned experiments and exit") + return p.parse_args() + +def discover_experiments(root: Path, want_receivers: set, want_jams: set, want_powers: set) -> list[dict]: + ''' + Prolazi kroz direktorije i pronalazi sve validne kombinacije + receiver/jamming_type/power. + ''' + experiments = [] + for receiver_dir in root.iterdir(): + if not receiver_dir.is_dir() or receiver_dir.name in EXCLUDED_RECEIVERS: + continue + if want_receivers and receiver_dir.name not in want_receivers: + continue + + for jam_dir in receiver_dir.iterdir(): + if not jam_dir.is_dir() or jam_dir.name in EXCLUDED_JAMMING_TYPES: + continue + if want_jams and jam_dir.name not in want_jams: + continue + + for power_dir in jam_dir.iterdir(): + if not power_dir.is_dir(): + continue + if want_powers and power_dir.name not in want_powers: + continue + + experiments.append({ + "receiver": receiver_dir.name, + "jamming_type": jam_dir.name, + "power": power_dir.name + }) + return experiments + +def run_single_experiment(exp_params: dict, seed: int, dry_run: bool) -> bool: + ''' + Ažurira params.yaml i pokreće 'dvc repro' za jedan eksperiment + Vraća True ako je uspješno, False ako nije. + ''' + exp_name = f"exp_{exp_params['receiver']}_{exp_params['jamming_type']}_{exp_params['power']}_seed{seed}".replace("-", "m") + print(f"> Running: {exp_name}") + + if dry_run: + return True + + # Update params.yaml + try: + with open(PARAMS_FILE, "r") as f: + params = yaml.safe_load(f) + except FileNotFoundError: + print(f"ERROR: {PARAMS_FILE} not found!", file=sys.stderr) + return False + + params["receiver_name"] = exp_params["receiver"] + params["jamming_type"] = exp_params["jamming_type"] + params["jamming_power"] = exp_params["power"] + params["seed"] = seed + + if "train" in params: + params["train"]["output_dir"] = f"reports/{exp_params['receiver']}/{exp_params['jamming_type']}/{exp_params['power']}/seed_{seed}" + params["output_dir"] = f"data/processed/{exp_params['receiver']}/{exp_params['jamming_type']}/{exp_params['power']}/seed_{seed}" + + with open(PARAMS_FILE, "w") as f: + yaml.dump(params, f) + + # Run DVC pipeline : + res = subprocess.run(["dvc", "repro"], check=False) + #res = subprocess.run(["dvc", "repro"], check=False, capture_output=True, text=True) + if res.returncode != 0: + print(f"! Experiment {exp_name} FAILED", file=sys.stderr) + print("--- DVC STDOUT ---", file=sys.stderr) + print(res.stdout, file=sys.stderr) + print("--- DVC STDERR ---", file=sys.stderr) + print(res.stderr, file=sys.stderr) + return False + + return True + +def main() -> None: + ns = parse_args() + want_receivers = _split(ns.receivers) + want_jams = _split(ns.jams) + want_powers = _split(ns.powers) + seeds = [int(s.strip()) for s in ns.seeds.split(",")] if ns.seeds else [42, 145, 156, 5, 85] + + print("--- Discovering experiments ---") + experiments_to_run = discover_experiments(DATA_ROOT, want_receivers, want_jams, want_powers) + + if not experiments_to_run: + print("No experiment combinations found for the selected filters.") + return + + # Spajamo eksperimente i seedove u jednu listu + all_combinations = list(itertools.product(experiments_to_run, seeds)) + print(f"Found {len(experiments_to_run)} experiment configurations, running for {len(seeds)} seeds.") + print(f"Total experiments to run: {len(all_combinations)}") + + all_failures = [] + for exp_params, seed in all_combinations: + success = run_single_experiment(exp_params, seed, ns.dry_run) + if not success: + exp_name = f"{exp_params['receiver']}_{exp_params['jamming_type']}_{exp_params['power']}_seed{seed}" + all_failures.append(exp_name) + + # Ispis rezultata + if ns.dry_run: + print("\n(Dry-run complete - no experiments executed.)") + return + + if all_failures: + print("\n--- Some experiments failed: ---") + for name in all_failures: + print(f" - {name}") + else: + print("\n--- All selected experiments completed successfully!--- + +if __name__ == "__main__": + main() + \ No newline at end of file diff --git a/run_experiment.py b/run_experiment.py new file mode 100644 index 0000000..c6c4092 --- /dev/null +++ b/run_experiment.py @@ -0,0 +1,88 @@ +import numpy as np +import pandas as pd +import yaml +import os +import random +import torch +from pathlib import Path +import pickle +from gnss.modeling.train import train_lightning_model +from dvclive.lightning import DVCLiveLogger +from gnss.config import PARSED_DATA_DIR +#torch.set_float32_matmul_precision('high') + + +def main(): + with open('params.yaml', 'r') as f: + params = yaml.safe_load(f) + + seed = params['seed'] + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + + output_dir = Path(params['train']['output_dir']) + data_out_dir = Path(params['output_dir']) + + print(f"Loading data from: {data_out_dir}") + train_signals = torch.load(data_out_dir / 'train_graphs.pt') + val_signals = torch.load(data_out_dir / 'val_graphs.pt') + test_signals = torch.load(data_out_dir / 'test_graphs.pt') + + print("Učitavanje dva seta scalera...") + scalers_for_train_val = { + 'rec': torch.load(data_out_dir / 'rec_scaler_train.pt'), + 'sat': torch.load(data_out_dir / 'sat_scaler_train.pt'), + 'targ': torch.load(data_out_dir / 'targ_scaler_train.pt') + } + scalers_for_test = { + 'rec': torch.load(data_out_dir / 'rec_scaler_test.pt'), + 'sat': torch.load(data_out_dir / 'sat_scaler_test.pt'), + 'targ': torch.load(data_out_dir / 'targ_scaler_test.pt') + } + + metadata = train_signals[0][0].metadata() if train_signals and train_signals[0] else None + + logger = DVCLiveLogger( + dir=str(output_dir / "dvclive"), + save_dvc_exp=False, + dvcyaml=False + ) + + + config = { + **params, + 'output_dir': str(output_dir), + 'logger': logger + } + + trainer, best_model_path, best_metrics = train_lightning_model( + train_signals, + val_signals, + test_signals, + scalers_for_train_val, + scalers_for_test, + metadata, + config + ) + + if hasattr(logger, "finalize"): + logger.finalize("success") + + if best_metrics: + print("\n--- Final metrics on test set ---") + metrics_to_save = {k: v.item() if isinstance(v, torch.Tensor) else v for k, v in best_metrics.items()} + print(yaml.dump(metrics_to_save, indent=2)) + + with open(output_dir / 'metrics.yaml', 'w') as f: + yaml.dump(metrics_to_save, f) + + if best_model_path: + print(f"\nBest model saved on: {best_model_path}") + + print(f"\nLogs saved on: {output_dir}") + +if __name__ == '__main__': + main() diff --git a/setup.py b/setup.py deleted file mode 100644 index dc3d4c9..0000000 --- a/setup.py +++ /dev/null @@ -1,10 +0,0 @@ -from setuptools import find_packages, setup - -setup( - name='src', - packages=find_packages(), - version='0.1.0', - description='A short description of the project.', - author='Your name (or your organization/company/team)', - license='MIT', -) diff --git a/src/data/__init__.py b/src/data/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/data/make_dataset.py b/src/data/make_dataset.py deleted file mode 100644 index 96b377a..0000000 --- a/src/data/make_dataset.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- -import click -import logging -from pathlib import Path -from dotenv import find_dotenv, load_dotenv - - -@click.command() -@click.argument('input_filepath', type=click.Path(exists=True)) -@click.argument('output_filepath', type=click.Path()) -def main(input_filepath, output_filepath): - """ Runs data processing scripts to turn raw data from (../raw) into - cleaned data ready to be analyzed (saved in ../processed). - """ - logger = logging.getLogger(__name__) - logger.info('making final data set from raw data') - - -if __name__ == '__main__': - log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' - logging.basicConfig(level=logging.INFO, format=log_fmt) - - # not used in this stub but often useful for finding various files - project_dir = Path(__file__).resolve().parents[2] - - # find .env automagically by walking up directories until it's found, then - # load up the .env entries as environment variables - load_dotenv(find_dotenv()) - - main() diff --git a/src/features/.gitkeep b/src/features/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/src/features/__init__.py b/src/features/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/features/build_features.py b/src/features/build_features.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/models/.gitkeep b/src/models/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/src/models/__init__.py b/src/models/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/models/predict_model.py b/src/models/predict_model.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/models/train_model.py b/src/models/train_model.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/visualization/.gitkeep b/src/visualization/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/src/visualization/__init__.py b/src/visualization/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/visualization/visualize.py b/src/visualization/visualize.py deleted file mode 100644 index e69de29..0000000 diff --git a/test_environment.py b/test_environment.py deleted file mode 100644 index d0ac4a7..0000000 --- a/test_environment.py +++ /dev/null @@ -1,25 +0,0 @@ -import sys - -REQUIRED_PYTHON = "python3" - - -def main(): - system_major = sys.version_info.major - if REQUIRED_PYTHON == "python": - required_major = 2 - elif REQUIRED_PYTHON == "python3": - required_major = 3 - else: - raise ValueError("Unrecognized python interpreter: {}".format( - REQUIRED_PYTHON)) - - if system_major != required_major: - raise TypeError( - "This project requires Python {}. Found: Python {}".format( - required_major, sys.version)) - else: - print(">>> Development environment passes all tests!") - - -if __name__ == '__main__': - main() diff --git a/tox.ini b/tox.ini deleted file mode 100644 index c32fbd8..0000000 --- a/tox.ini +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -max-line-length = 79 -max-complexity = 10 From 000e30824f99c29e70b3059a2263b7612db1e995 Mon Sep 17 00:00:00 2001 From: IkaKes Date: Wed, 18 Mar 2026 16:26:37 +0000 Subject: [PATCH 04/11] Remove unused files and update core GNSS experiment pipeline --- LICENSE | 10 -- Makefile | 86 ---------- docs/.gitkeep | 0 docs/README.md | 12 -- docs/docs/getting-started.md | 6 - docs/docs/index.md | 10 -- docs/mkdocs.yml | 4 - dvc.yaml | 1 - gnss/__init__.py | 1 - gnss/config.py | 14 +- gnss/dataset.py | 192 ++++++++++----------- gnss/features.py | 36 ---- gnss/model.py | 97 ++++++----- gnss/plots.py | 32 ---- gnss/{modeling => train}/__init__.py | 0 gnss/{modeling => train}/train.py | 124 +++++++------- models/.gitkeep | 0 params.yaml | 46 ++--- prepare_data.py | 241 +++++++++------------------ pyproject.toml | 32 ---- references/.gitkeep | 0 reports/.gitkeep | 0 reports/figures/.gitkeep | 0 run_all_experiments.py | 168 +++++++++++-------- run_experiment.py | 78 +++++---- 25 files changed, 439 insertions(+), 751 deletions(-) delete mode 100644 LICENSE delete mode 100644 Makefile delete mode 100644 docs/.gitkeep delete mode 100644 docs/README.md delete mode 100644 docs/docs/getting-started.md delete mode 100644 docs/docs/index.md delete mode 100644 docs/mkdocs.yml delete mode 100644 gnss/features.py delete mode 100644 gnss/plots.py rename gnss/{modeling => train}/__init__.py (100%) rename gnss/{modeling => train}/train.py (63%) delete mode 100644 models/.gitkeep delete mode 100644 pyproject.toml delete mode 100644 references/.gitkeep delete mode 100644 reports/.gitkeep delete mode 100644 reports/figures/.gitkeep diff --git a/LICENSE b/LICENSE deleted file mode 100644 index b47709e..0000000 --- a/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ - -The MIT License (MIT) -Copyright (c) 2025, Your name (or your organization/company/team) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/Makefile b/Makefile deleted file mode 100644 index 59e6586..0000000 --- a/Makefile +++ /dev/null @@ -1,86 +0,0 @@ -################################################################################# -# GLOBALS # -################################################################################# - -PROJECT_NAME = gnss -PYTHON_VERSION = 3.10 -PYTHON_INTERPRETER = python - -################################################################################# -# COMMANDS # -################################################################################# - - -## Install Python dependencies -.PHONY: requirements -requirements: - $(PYTHON_INTERPRETER) -m pip install -U pip - $(PYTHON_INTERPRETER) -m pip install -r requirements.txt - - - - -## Delete all compiled Python files -.PHONY: clean -clean: - find . -type f -name "*.py[co]" -delete - find . -type d -name "__pycache__" -delete - - -## Lint using ruff (use `make format` to do formatting) -.PHONY: lint -lint: - ruff format --check - ruff check - -## Format source code with ruff -.PHONY: format -format: - ruff check --fix - ruff format - - - -## Run tests -.PHONY: test -test: - python -m pytest tests - - -## Set up Python interpreter environment -.PHONY: create_environment -create_environment: - @bash -c "if [ ! -z `which virtualenvwrapper.sh` ]; then source `which virtualenvwrapper.sh`; mkvirtualenv $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER); else mkvirtualenv.bat $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER); fi" - @echo ">>> New virtualenv created. Activate with:\nworkon $(PROJECT_NAME)" - - - - -################################################################################# -# PROJECT RULES # -################################################################################# - - -## Make dataset -.PHONY: data -data: requirements - $(PYTHON_INTERPRETER) gnss/dataset.py - - -################################################################################# -# Self Documenting Commands # -################################################################################# - -.DEFAULT_GOAL := help - -define PRINT_HELP_PYSCRIPT -import re, sys; \ -lines = '\n'.join([line for line in sys.stdin]); \ -matches = re.findall(r'\n## (.*)\n[\s\S]+?\n([a-zA-Z_-]+):', lines); \ -print('Available rules:\n'); \ -print('\n'.join(['{:25}{}'.format(*reversed(match)) for match in matches])) -endef -export PRINT_HELP_PYSCRIPT - -help: - @$(PYTHON_INTERPRETER) -c "${PRINT_HELP_PYSCRIPT}" < $(MAKEFILE_LIST) diff --git a/docs/.gitkeep b/docs/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 79c1468..0000000 --- a/docs/README.md +++ /dev/null @@ -1,12 +0,0 @@ -Generating the docs ----------- - -Use [mkdocs](http://www.mkdocs.org/) structure to update the documentation. - -Build locally with: - - mkdocs build - -Serve locally with: - - mkdocs serve diff --git a/docs/docs/getting-started.md b/docs/docs/getting-started.md deleted file mode 100644 index b4f71c3..0000000 --- a/docs/docs/getting-started.md +++ /dev/null @@ -1,6 +0,0 @@ -Getting started -=============== - -This is where you describe how to get set up on a clean install, including the -commands necessary to get the raw data (using the `sync_data_from_s3` command, -for example), and then how to make the cleaned, final data sets. diff --git a/docs/docs/index.md b/docs/docs/index.md deleted file mode 100644 index 6ea5743..0000000 --- a/docs/docs/index.md +++ /dev/null @@ -1,10 +0,0 @@ -# GNSS documentation! - -## Description - -A short description of the project. - -## Commands - -The Makefile contains the central entry points for common tasks related to this project. - diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml deleted file mode 100644 index 8076416..0000000 --- a/docs/mkdocs.yml +++ /dev/null @@ -1,4 +0,0 @@ -site_name: GNSS -# -site_author: Your name (or your organization/company/team) -# \ No newline at end of file diff --git a/dvc.yaml b/dvc.yaml index 436e106..a409ede 100644 --- a/dvc.yaml +++ b/dvc.yaml @@ -15,7 +15,6 @@ stages: - params.yaml - ${output_dir} outs: - #- ${train.output_dir}/best_full_model_lightning.pth - ${train.output_dir}/best_model.ckpt - ${train.output_dir}/dvclive/metrics.json: cache: false diff --git a/gnss/__init__.py b/gnss/__init__.py index 44a1789..e69de29 100644 --- a/gnss/__init__.py +++ b/gnss/__init__.py @@ -1 +0,0 @@ -from gnss import config # noqa: F401 diff --git a/gnss/config.py b/gnss/config.py index 1abed2a..5a687a0 100644 --- a/gnss/config.py +++ b/gnss/config.py @@ -1,28 +1,24 @@ from pathlib import Path -from dotenv import load_dotenv from loguru import logger -import random -import numpy as np -import torch -# Paths PROJ_ROOT = Path(__file__).resolve().parents[1] logger.info(f"PROJ_ROOT path is: {PROJ_ROOT}") DATA_DIR = PROJ_ROOT / "data" -PARSED_DATA_DIR = DATA_DIR / "parsed" RAW_DATA_DIR = DATA_DIR / "raw" +PARSED_DATA_DIR = DATA_DIR / "parsed" PROCESSED_DATA_DIR = DATA_DIR / "processed" + +MIXED_DATA_DIR = DATA_DIR / "mixed" +RANDOM_MIXED_DATA_DIR = DATA_DIR / "mixed_random" + MODELS_DIR = PROJ_ROOT / "models" REPORTS_DIR = PROJ_ROOT / "reports" FIGURES_DIR = REPORTS_DIR / "figures" -# If tqdm is installed, configure loguru with tqdm.write try: from tqdm import tqdm logger.remove(0) logger.add(lambda msg: tqdm.write(msg, end=""), colorize=True) except ModuleNotFoundError: pass - -window_size = 10 \ No newline at end of file diff --git a/gnss/dataset.py b/gnss/dataset.py index ee6baa6..fea68ae 100644 --- a/gnss/dataset.py +++ b/gnss/dataset.py @@ -1,148 +1,154 @@ -import os import numpy as np import pandas as pd from torch.utils.data import Dataset, DataLoader, ConcatDataset from sklearn.preprocessing import StandardScaler from torch_geometric_temporal.signal import DynamicHeteroGraphTemporalSignal -from .config import window_size - -# 1) Single measurement loader: def load_and_process_single_measurement(sats_csv_path, receiver_csv_path): - - sats_df_meas = pd.read_csv(sats_csv_path) + sats_df_meas = pd.read_csv(sats_csv_path) receiver_df_meas = pd.read_csv(receiver_csv_path) time_steps_meas = sorted(receiver_df_meas['T_ID'].unique()) - - feature_dicts_meas = [] - target_dicts_meas = [] + + feature_dicts_meas = [] + target_dicts_meas = [] edge_index_dicts_meas = [] additional_sids_dicts = [] - + for t_local in time_steps_meas: - rec = receiver_df_meas[receiver_df_meas['T_ID'] == t_local].iloc[0] + rec = receiver_df_meas[receiver_df_meas['T_ID'] == t_local].iloc[0] feat_rec = rec[['Lat', 'Lon']].to_numpy().reshape(1, 2) targ_rec = rec[['LatDev', 'LonDev']].to_numpy().reshape(1, 2) - - sats_t = sats_df_meas[sats_df_meas['T_ID'] == t_local].sort_values('S_ID') - feat_sat = sats_t[['SNR', 'az', 'el']].to_numpy() - s_ids_sat = sats_t['S_ID'].values.astype(np.int64) - n_sat = feat_sat.shape[0] - -# if n_sat > 0 and n_sat != len(s_ids_sat): -# s_ids_sat = s_ids_sat[:n_sat] -# elif n_sat == 0 and len(s_ids_sat) > 0: -# s_ids_sat = np.array([], dtype=np.int64) - - src = np.zeros(n_sat, dtype=int) - dst = np.arange(n_sat, dtype=int) - edges = np.vstack([src, dst]) + + sats_t = sats_df_meas[sats_df_meas['T_ID'] == t_local].sort_values('S_ID') + feat_sat = sats_t[['SNR', 'az', 'el']].to_numpy() + s_ids_sat = sats_t['S_ID'].values.astype(np.int64) + n_sat = feat_sat.shape[0] + + src = np.zeros(n_sat, dtype=int) + dst = np.arange(n_sat, dtype=int) + edges = np.vstack([src, dst]) if n_sat > 0 else np.empty((2, 0), dtype=int) edges_rev = edges[::-1].copy() - - feature_dicts_meas.append({ - 'receiver': feat_rec, - 'satellite': feat_sat - }) - target_dicts_meas.append({ - 'receiver': targ_rec - }) + + feature_dicts_meas.append({'receiver': feat_rec, 'satellite': feat_sat}) + target_dicts_meas.append({'receiver': targ_rec}) edge_index_dicts_meas.append({ - ('receiver', 'to', 'satellite'): edges, + ('receiver', 'to', 'satellite'): edges, ('satellite', 'rev_to', 'receiver'): edges_rev }) - additional_sids_dicts.append({ - 'satellite_s_ids': s_ids_sat - }) - - edge_weight_dicts_meas = [None] * len(time_steps_meas) + additional_sids_dicts.append({'satellite_s_ids': s_ids_sat}) + return ( feature_dicts_meas, target_dicts_meas, edge_index_dicts_meas, - edge_weight_dicts_meas, + [None] * len(time_steps_meas), time_steps_meas, additional_sids_dicts ) -# 2) Load and preprocess measurements: def load_all_measurements(measurement_files): all_measurements_processed = [] for m_info in measurement_files: - features, targets, edges, weights, times, sids_per_ts = ( - load_and_process_single_measurement( - m_info["sats"], - m_info["receiver"] - ) + features, targets, edges, weights, times, sids = load_and_process_single_measurement( + m_info["sats"], m_info["receiver"] ) - all_measurements_processed.append({ - "id": m_info["id"], - "features": features, - "targets": targets, - "edges": edges, - "weights": weights, - "time_steps": times, - "satellite_s_ids": sids_per_ts + "id": m_info["id"], + "features": features, + "targets": targets, + "edges": edges, + "weights": weights, + "time_steps": times, + "satellite_s_ids": sids }) - return all_measurements_processed - -# 3) Aggregation for normalization: - -def aggregate_for_normalization(train_measurements_data): - agg_train_rec_feats = [] - agg_train_sat_feats = [] - agg_train_targ_rec = [] - for meas_data in train_measurements_data: - num_ts = len(meas_data["features"]) - for i in range(num_ts): - fr = meas_data["features"][i]['receiver'] - agg_train_rec_feats.append(fr) - fs = meas_data["features"][i]['satellite'] + + +def aggregate_for_normalization(measurements_data): + agg_rec, agg_sat, agg_targ = [], [], [] + for meas in measurements_data: + for i in range(len(meas["features"])): + agg_rec.append(meas["features"][i]['receiver']) + agg_targ.append(meas["targets"][i]['receiver']) + fs = meas["features"][i]['satellite'] if fs.size > 0: - agg_train_sat_feats.append(fs) - tr = meas_data["targets"][i]['receiver'] - agg_train_targ_rec.append(tr) - return agg_train_rec_feats, agg_train_sat_feats, agg_train_targ_rec + agg_sat.append(fs) + + return ( + np.vstack(agg_rec) if agg_rec else np.empty((0, 2)), + np.vstack(agg_sat) if agg_sat else np.empty((0, 3)), + np.vstack(agg_targ) if agg_targ else np.empty((0, 2)) + ) + + +def fit_standard_scalers(rec_np, sat_np, targ_np): + return ( + StandardScaler().fit(rec_np), + StandardScaler().fit(sat_np), + StandardScaler().fit(targ_np) + ) -# 4) Fit StandardScalers: -def fit_standard_scalers(rec_feats_np, sat_feats_np, targ_rec_np): - rec_scaler = StandardScaler().fit(rec_feats_np) - targ_scaler = StandardScaler().fit(targ_rec_np) - sat_scaler = StandardScaler().fit(sat_feats_np) - return rec_scaler, sat_scaler, targ_scaler +def normalize_with_scalers(measurement_data_list, rec_scaler, sat_scaler, targ_scaler): + normalized_measurements = [] + for meas_data in measurement_data_list: + norm_feat_dicts = [] + norm_targ_dicts = [] + norm_sids_list = [] + + for i in range(len(meas_data["features"])): + fr = meas_data["features"][i]['receiver'] + fs = meas_data["features"][i]['satellite'] + sids = meas_data["satellite_s_ids"][i]['satellite_s_ids'] + tr = meas_data["targets"][i]['receiver'] + + norm_fr = rec_scaler.transform(fr) + norm_tr = targ_scaler.transform(tr) + norm_fs = sat_scaler.transform(fs) if fs.size > 0 else fs.copy() + + norm_feat_dicts.append({'receiver': norm_fr, 'satellite': norm_fs}) + norm_targ_dicts.append({'receiver': norm_tr}) + norm_sids_list.append({'satellite_s_ids': sids.copy()}) + + normalized_measurements.append({ + **meas_data, + "features": norm_feat_dicts, + "targets": norm_targ_dicts, + "satellite_s_ids": norm_sids_list + }) + + return normalized_measurements -# 5) create_signals: def create_signals(measurements): signals = [] - for meas_data in measurements: - signal = DynamicHeteroGraphTemporalSignal( - edge_index_dicts = meas_data["edges"], - edge_weight_dicts = meas_data["weights"], - feature_dicts = meas_data["features"], - target_dicts = meas_data["targets"], - satellite_s_ids = meas_data["satellite_s_ids"] + for m in measurements: + sig = DynamicHeteroGraphTemporalSignal( + edge_index_dicts=m["edges"], + edge_weight_dicts=m["weights"], + feature_dicts=m["features"], + target_dicts=m["targets"], + **{"satellite_s_ids": m["satellite_s_ids"]} ) - signals.append(signal) + signals.append(sig) return signals -# 6) SlidingWindowDataset & build_loader: class SlidingWindowDataset(Dataset): def __init__(self, signal, window_size, stride=1): self.signal = signal self.window_size = window_size self.stride = stride + def __len__(self): return max(0, (self.signal.snapshot_count - self.window_size) // self.stride + 1) + def __getitem__(self, idx): start = idx * self.stride - end = start + self.window_size - return [self.signal[t] for t in range(start, end)] + return [self.signal[t] for t in range(start, start + self.window_size)] + def build_loader(signals, window_size, shuffle, stride=1): datasets = [] @@ -152,11 +158,9 @@ def build_loader(signals, window_size, shuffle, stride=1): datasets.append(ds) if not datasets: return None - concat = ConcatDataset(datasets) return DataLoader( - concat, + ConcatDataset(datasets), batch_size=1, shuffle=shuffle, collate_fn=lambda batch: batch[0] - ) - + ) \ No newline at end of file diff --git a/gnss/features.py b/gnss/features.py deleted file mode 100644 index 4430045..0000000 --- a/gnss/features.py +++ /dev/null @@ -1,36 +0,0 @@ -import numpy as np - -def normalize_with_scalers(measurement_data_list, rec_scaler, sat_scaler, targ_scaler): - normalized_measurements = [] - for meas_data in measurement_data_list: - norm_feat_dicts = [] - norm_targ_dicts = [] - norm_sids_list = [] - num_ts = len(meas_data["features"]) - for i in range(num_ts): - fr = meas_data["features"][i]['receiver'] - fs = meas_data["features"][i]['satellite'] - sids = meas_data["satellite_s_ids"][i]['satellite_s_ids'] - tr = meas_data["targets"][i]['receiver'] - norm_fr = rec_scaler.transform(fr) - norm_tr = targ_scaler.transform(tr) - if fs.size > 0: - norm_fs = sat_scaler.transform(fs) - else: - norm_fs = fs.copy() - norm_feat_dicts.append({ - 'receiver': norm_fr, - 'satellite': norm_fs - }) - norm_targ_dicts.append({ - 'receiver': norm_tr - }) - norm_sids_list.append({'satellite_s_ids': sids.copy()}) - new_meas = { - **meas_data, - "features": norm_feat_dicts, - "targets": norm_targ_dicts, - "satellite_s_ids": norm_sids_list - } - normalized_measurements.append(new_meas) - return normalized_measurements diff --git a/gnss/model.py b/gnss/model.py index 92cc444..ba4b0f3 100644 --- a/gnss/model.py +++ b/gnss/model.py @@ -1,70 +1,69 @@ import torch import torch.nn as nn -import numpy as np from torch_geometric_temporal.nn.hetero import HeteroGCLSTM -class FullModel(nn.Module): - - def __init__(self, in_channels_dict, hidden_dim, metadata, dropout_rate=0.1): +class JaGuard(nn.Module): + def __init__(self, in_channels_dict, hidden_dim, metadata, num_total_sats, dropout_rate=0.1): super().__init__() - + + self.hidden_dim = hidden_dim + self.num_total_sats = num_total_sats + self.gclstm = HeteroGCLSTM( in_channels_dict=in_channels_dict, out_channels=hidden_dim, metadata=metadata ) - self.dropout = nn.Dropout(dropout_rate) - self.linear_out = nn.Linear(hidden_dim, 2) def forward(self, window_snapshots, device): - - hidden_dim = self.linear_out.in_features - + # Initialize per-window memory for all satellites + h_sat_memory = torch.zeros(self.num_total_sats, self.hidden_dim, device=device) + c_sat_memory = torch.zeros(self.num_total_sats, self.hidden_dim, device=device) + + h_rec = torch.zeros(1, self.hidden_dim, device=device) + c_rec = torch.zeros(1, self.hidden_dim, device=device) + + # Iterate through all snapshots in the window + for snapshot in window_snapshots: + snapshot = snapshot.to(device) + x_dict = snapshot.x_dict + eidx = snapshot.edge_index_dict + + # satellite_s_ids is stored under double-key in PyG NodeStorage + s_ids_raw = snapshot['satellite_s_ids']['satellite_s_ids'] + if not isinstance(s_ids_raw, torch.Tensor): + s_ids_raw = torch.tensor(s_ids_raw, dtype=torch.long) + s_ids = s_ids_raw.to(device).reshape(-1) + + # READ: select only currently visible satellites from memory bank + if s_ids.numel() > 0: + h_sat_active = torch.index_select(h_sat_memory, 0, s_ids) + c_sat_active = torch.index_select(c_sat_memory, 0, s_ids) + else: + # No satellites visible — active state is empty + h_sat_active = torch.empty((0, self.hidden_dim), device=device) + c_sat_active = torch.empty((0, self.hidden_dim), device=device) - h_state = {'receiver': torch.zeros(hidden_dim, device=device)} - c_state = {'receiver': torch.zeros(hidden_dim, device=device)} + h_dict_in = {'receiver': h_rec, 'satellite': h_sat_active} + c_dict_in = {'receiver': c_rec, 'satellite': c_sat_active} - for snapshot in window_snapshots[:-1]: - x_dict_for_gclstm = snapshot.x_dict - eidx_on_device = snapshot.edge_index_dict - - s_ids_val = snapshot['satellite_s_ids']['satellite_s_ids'] - + # LSTM STEP + h_out, c_out = self.gclstm(x_dict, eidx, h_dict_in, c_dict_in) - num_sat = s_ids_val.shape[0] - - h_sat = torch.zeros((num_sat, hidden_dim), device=device) - c_sat = torch.zeros((num_sat, hidden_dim), device=device) + # Update receiver state + h_rec = h_out['receiver'] + c_rec = c_out['receiver'] - rec_h = h_state['receiver'].unsqueeze(0) - rec_c = c_state['receiver'].unsqueeze(0) + # updated states back to memory bank + if s_ids.numel() > 0: + h_sat_memory = h_sat_memory.index_put((s_ids,), h_out['satellite']) + c_sat_memory = c_sat_memory.index_put((s_ids,), c_out['satellite']) - for j, sid_tensor in enumerate(s_ids_val): - sid_key = sid_tensor.item() - if sid_key in h_state: - h_sat[j] = h_state[sid_key] - c_sat[j] = c_state[sid_key] - - h_dict_step = {'receiver': rec_h, 'satellite': h_sat} - c_dict_step = {'receiver': rec_c, 'satellite': c_sat} - - h_out, c_out = self.gclstm(x_dict_for_gclstm, eidx_on_device, h_dict_step, c_dict_step) - - h_state['receiver'] = h_out['receiver'][0] - c_state['receiver'] = c_out['receiver'][0] - - for j, sid_tensor in enumerate(s_ids_val): - sid_key = sid_tensor.item() - h_state[sid_key] = h_out['satellite'][j] - c_state[sid_key] = c_out['satellite'][j] - - h_final = h_state['receiver'].unsqueeze(0) - h_dropped = self.dropout(h_final) - pred_norm = self.linear_out(h_dropped) - true_norm = window_snapshots[-1].y_dict['receiver'] - - return pred_norm, true_norm + # Predict from final receiver hidden state + pred = self.linear_out(self.dropout(h_rec)) + true = window_snapshots[-1].y_dict['receiver'] + return pred, true \ No newline at end of file diff --git a/gnss/plots.py b/gnss/plots.py deleted file mode 100644 index f28d494..0000000 --- a/gnss/plots.py +++ /dev/null @@ -1,32 +0,0 @@ -import matplotlib.pyplot as plt - -def plot_loss_curves(epochs, train_losses, test_losses, save_path=None): - plt.figure(figsize=(8, 4)) - plt.plot(epochs, train_losses, marker='o', label='Train Loss') - plt.plot(epochs, test_losses, marker='o', label='Test Loss') - plt.xlabel('Epoch') - plt.ylabel('SmoothL1 Loss') - plt.title('Training vs. Validation Loss by Epoch') - plt.legend() - plt.grid(True) - if save_path: - plt.savefig(save_path, bbox_inches='tight') - plt.close() - else: - plt.show() - -def plot_mae_multi_curve(epochs, mae_lat, mae_lon, mae_sum, save_path=None): - plt.figure(figsize=(8, 4)) - plt.plot(epochs, mae_lat, marker='o', color='tab:blue', label='Val MAE Lat (cm)') - plt.plot(epochs, mae_lon, marker='o', color='tab:green', label='Val MAE Lon (cm)') - plt.plot(epochs, mae_sum, marker='o', color='tab:orange', label='Val MAE Sum (cm)') - plt.xlabel('Epoch') - plt.ylabel('MAE (cm)') - plt.title('Validation MAE by Epoch (Lat, Lon, Sum)') - plt.legend() - plt.grid(True) - if save_path: - plt.savefig(save_path, bbox_inches='tight') - plt.close() - else: - plt.show() \ No newline at end of file diff --git a/gnss/modeling/__init__.py b/gnss/train/__init__.py similarity index 100% rename from gnss/modeling/__init__.py rename to gnss/train/__init__.py diff --git a/gnss/modeling/train.py b/gnss/train/train.py similarity index 63% rename from gnss/modeling/train.py rename to gnss/train/train.py index 5c4e8c1..3e18285 100644 --- a/gnss/modeling/train.py +++ b/gnss/train/train.py @@ -2,27 +2,29 @@ import pytorch_lightning as pl import torch.nn as nn import numpy as np -from dvclive.lightning import DVCLiveLogger -from gnss.dataset import build_loader -from torch.utils.data import DataLoader from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint -import os -import pandas as pd - -from gnss.model import FullModel -from gnss.model2 import FullModelTwoLayer +from gnss.dataset import build_loader +from gnss.model import JaGuard -class LightningFullModel(pl.LightningModule): - def __init__(self, model_class, in_channels_dict, hidden_dim, metadata, - scalers_tv, scalers_test, - initial_lr, weight_decay_val): +class LightningJaGuard(pl.LightningModule): + def __init__(self, model_class, in_channels_dict, hidden_dim, metadata, + scalers_tv, scalers_test, + initial_lr, weight_decay_val, + num_total_sats): super().__init__() self.save_hyperparameters('hidden_dim', 'initial_lr', 'weight_decay_val') - self.model = model_class(in_channels_dict, hidden_dim, metadata) + + self.model = model_class( + in_channels_dict, + hidden_dim, + metadata, + num_total_sats=num_total_sats, + ) + self.loss_fn = nn.SmoothL1Loss(beta=1e-2) - + self.scalers_for_train_val = scalers_tv self.scalers_for_test = scalers_test @@ -35,13 +37,13 @@ def _calculate_loss(self, pred_norm, true_norm): loss_lat = self.loss_fn(lat_p.unsqueeze(1), lat_t.unsqueeze(1)) loss_lon = self.loss_fn(lon_p.unsqueeze(1), lon_t.unsqueeze(1)) return loss_lat + loss_lon - + def training_step(self, batch, batch_idx): pred_norm, true_norm = self(batch) loss = self._calculate_loss(pred_norm, true_norm) self.log('train_loss', loss, on_step=False, on_epoch=True, prog_bar=True, batch_size=1) return loss - + def validation_step(self, batch, batch_idx): pred_norm, true_norm = self(batch) loss = self._calculate_loss(pred_norm, true_norm) @@ -50,17 +52,17 @@ def validation_step(self, batch, batch_idx): targ_scaler = self.scalers_for_train_val['targ'] pred_cm = targ_scaler.inverse_transform(pred_norm.cpu().numpy()) true_cm = targ_scaler.inverse_transform(true_norm.cpu().numpy()) - + mae_lat = np.abs(pred_cm[:, 0] - true_cm[:, 0]).mean() mae_lon = np.abs(pred_cm[:, 1] - true_cm[:, 1]).mean() - diffs = np.sqrt((pred_cm[:, 0] - true_cm[:, 0])**2 + (pred_cm[:, 1] - true_cm[:, 1])**2) + diffs = np.sqrt((pred_cm[:, 0] - true_cm[:, 0])**2 + (pred_cm[:, 1] - true_cm[:, 1])**2) mae_sum = diffs.mean() self.log('val_mae_lat_cm', mae_lat, on_epoch=True, prog_bar=False, batch_size=1) self.log('val_mae_lon_cm', mae_lon, on_epoch=True, prog_bar=False, batch_size=1) - self.log('val_mae_sum_cm', mae_sum, on_epoch=True, prog_bar=True, batch_size=1) + self.log('val_mae_sum_cm', mae_sum, on_epoch=True, prog_bar=True, batch_size=1) return loss - + def test_step(self, batch, batch_idx): pred_norm, true_norm = self(batch) loss = self._calculate_loss(pred_norm, true_norm) @@ -69,65 +71,61 @@ def test_step(self, batch, batch_idx): targ_scaler = self.scalers_for_test['targ'] pred_cm = targ_scaler.inverse_transform(pred_norm.cpu().numpy()) true_cm = targ_scaler.inverse_transform(true_norm.cpu().numpy()) - + mae_lat = np.abs(pred_cm[:, 0] - true_cm[:, 0]).mean() mae_lon = np.abs(pred_cm[:, 1] - true_cm[:, 1]).mean() - diffs = np.sqrt((pred_cm[:, 0] - true_cm[:, 0])**2 + (pred_cm[:, 1] - true_cm[:, 1])**2) + diffs = np.sqrt((pred_cm[:, 0] - true_cm[:, 0])**2 + (pred_cm[:, 1] - true_cm[:, 1])**2) mae_sum = diffs.mean() self.log('test_mae_lat_cm', mae_lat, on_epoch=True, prog_bar=False, batch_size=1) self.log('test_mae_lon_cm', mae_lon, on_epoch=True, prog_bar=False, batch_size=1) - self.log('test_mae_sum_cm', mae_sum, on_epoch=True, prog_bar=True, batch_size=1) + self.log('test_mae_sum_cm', mae_sum, on_epoch=True, prog_bar=True, batch_size=1) return loss - - def configure_optimizers(self): - optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.initial_lr, weight_decay=self.hparams.weight_decay_val) - return optimizer + def configure_optimizers(self): + return torch.optim.Adam( + self.parameters(), + lr=self.hparams.initial_lr, + weight_decay=self.hparams.weight_decay_val + ) def train_lightning_model( - train_signals, val_signals, test_signals, - scalers_for_train_val, scalers_for_test, - metadata, config + train_signals, val_signals, test_signals, + scalers_for_train_val, scalers_for_test, + metadata, config, num_total_sats ): prepare_cfg = config['prepare_data'] - train_cfg = config['train'] - + train_cfg = config['train'] + window_size = prepare_cfg['window_size'] - stride = prepare_cfg['stride'] - - train_loader = build_loader(train_signals, window_size, shuffle=True, stride=stride) - val_loader = build_loader(val_signals, window_size, shuffle=False, stride=stride) - test_loader = build_loader(test_signals, window_size, shuffle=False, stride=stride) - - if train_cfg['model_name'] == 'FullModel': - model_class_to_use = FullModel - else: - model_class_to_use = FullModelTwoLayer - - model = LightningFullModel( - model_class=model_class_to_use, + stride = prepare_cfg['stride'] + + train_loader = build_loader(train_signals, window_size, shuffle=True, stride=stride) + val_loader = build_loader(val_signals, window_size, shuffle=False, stride=stride) + test_loader = build_loader(test_signals, window_size, shuffle=False, stride=stride) + + model = LightningJaGuard( + model_class=JaGuard, in_channels_dict={'receiver': 2, 'satellite': 3}, - hidden_dim=train_cfg['hidden_dim'], - metadata=metadata, - scalers_tv=scalers_for_train_val, + hidden_dim=train_cfg['hidden_dim'], + metadata=metadata, + scalers_tv=scalers_for_train_val, scalers_test=scalers_for_test, - initial_lr=train_cfg['initial_lr'], - weight_decay_val=train_cfg['weight_decay_val'] + initial_lr=train_cfg['initial_lr'], + weight_decay_val=train_cfg['weight_decay_val'], + num_total_sats=num_total_sats ) - + early_stop_callback = EarlyStopping(**train_cfg['early_stopping']) - checkpoint_callback = ModelCheckpoint( - dirpath=train_cfg['output_dir'], # Ova putanja dolazi iz 'train' sekcije + dirpath=train_cfg['output_dir'], filename='best_model', save_top_k=1, monitor=train_cfg['early_stopping']['monitor'], mode=train_cfg['early_stopping']['mode'] - ) - + ) trainer = pl.Trainer( max_epochs=train_cfg['n_epochs'], @@ -137,19 +135,11 @@ def train_lightning_model( accelerator=train_cfg.get('accelerator'), devices=train_cfg.get('devices') ) - - trainer.fit(model, train_loader, val_loader) - print("\n--- Training done ---") - print(f"Best model: {checkpoint_callback.best_model_path}") - if checkpoint_callback.best_model_score: - print(f"Best value/metric '{train_cfg['early_stopping']['monitor']}': {checkpoint_callback.best_model_score:.4f}") + trainer.fit(model, train_loader, val_loader) + print(f"\nBest model checkpoint: {checkpoint_callback.best_model_path}") - print("\n--- Testing on test set ---") + print("\n--- Testing ---") trainer.test(dataloaders=test_loader, ckpt_path='best', verbose=True) - final_metrics = trainer.callback_metrics - print("\n--- Final metrics on the test set ---") - print(final_metrics) - - return trainer, checkpoint_callback.best_model_path, final_metrics \ No newline at end of file + return trainer, checkpoint_callback.best_model_path, trainer.callback_metrics \ No newline at end of file diff --git a/models/.gitkeep b/models/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/params.yaml b/params.yaml index fe81f34..9e9c875 100644 --- a/params.yaml +++ b/params.yaml @@ -1,41 +1,27 @@ -# ================================================================ -# Glavni parametri eksperimenata -# ================================================================ -receiver_name: Ublox10 -jamming_type: cw -jamming_power: '-50' +jamming_power: '-45' +jamming_type: cw3 +receiver_name: GP01 seed: 42 +output_dir: PARSED_PROCESSED/GP01/cw3/-45/seed_42 -# ================================================================ -# Parametri za prepare_data.py -# ================================================================ prepare_data: - train_ratio: 0.7 - val_ratio: 0.15 - + train_val_ratio: 0.80 + val_split_ratio: 0.10 window_size: 10 stride: 10 -# ================================================================ -# Parametri za train.py -# ================================================================ train: - model_name: "FullModel" - n_epochs: 40 - hidden_dim: 128 - - # optimizator - initial_lr: 0.0001 + accelerator: cuda + devices: 1 + hidden_dim: 256 + initial_lr: 0.001 weight_decay_val: 0.01 - - # Early Stopping + n_epochs: 100 + model_name: JaGuard + output_dir: PARSED_REPORTS/GP01/cw3/-45/seed_42 early_stopping: monitor: val_loss - patience: 5 - mode: 'min' + mode: min + patience: 10 min_delta: 0.001 - verbose: True - - accelerator: "cuda" - devices: 1 - + verbose: true \ No newline at end of file diff --git a/prepare_data.py b/prepare_data.py index 55b25dd..1834aa2 100644 --- a/prepare_data.py +++ b/prepare_data.py @@ -5,217 +5,126 @@ import random from pathlib import Path from gnss import dataset -from gnss.features import normalize_with_scalers from gnss.config import PARSED_DATA_DIR -# (1) Load parameters: -with open('params.yaml', 'r') as f: - params = yaml.safe_load(f) - -# Read exp group parameters -receiver_name = params['receiver_name'] -jamming_type = params['jamming_type'] -jamming_power = params['jamming_power'] -seed = params['seed'] -window_size = params['prepare_data']['window_size'] -stride = params['prepare_data']['stride'] -train_ratio = params['prepare_data'].get('train_ratio', 0.7) -val_ratio = params['prepare_data'].get('val_ratio', 0.15) -assert train_ratio + val_ratio < 1.0, "train_ratio + val_ratio must be < 1.0" - -# (2) Set up input/output dirs: - -input_dir = os.path.join(PARSED_DATA_DIR, str(receiver_name), str(jamming_type), str(jamming_power)) -output_dir = params.get("output_dir") -if not output_dir: - raise ValueError("No global 'output_dir' found in params.yaml!") -os.makedirs(output_dir, exist_ok=True) -print(f" INFO - Using input directory: {input_dir}") -print(f" INFO - Saving outputs to: {output_dir}") - - -# (2) Discover measurement files (one per ts): - def get_measurement_definitions(base_path): measurement_definitions = [] base_path = Path(base_path) - print(f" DEBUG - Scanning for measurement folders in: {base_path}") + print(f"INFO - Scanning folder: {base_path}") + if not base_path.exists(): + return [] for subdir in sorted(base_path.iterdir()): if subdir.is_dir() and subdir.name.startswith('R'): sats_file = subdir / 'sats_data.csv' - rec_file = subdir / 'reciever_data.csv' + rec_file = subdir / 'reciever_data.csv' if sats_file.exists() and rec_file.exists(): measurement_definitions.append({ - "id": subdir.name, - "sats": str(sats_file), + "id": subdir.name, + "sats": str(sats_file), "receiver": str(rec_file), }) - print(f" DEBUG - Found measurement: {subdir.name} | sats: {sats_file} | rec: {rec_file}") - else: - print(f" WARNING - Missing sats or receiver file in {subdir}") - print(f" INFO -Found {len(measurement_definitions)} measurement pairs in {base_path}") return measurement_definitions +with open('params.yaml', 'r') as f: + params = yaml.safe_load(f) + +receiver_name = params['receiver_name'] +jamming_type = params['jamming_type'] +jamming_power = params['jamming_power'] +seed = params['seed'] + +train_val_ratio = params['prepare_data'].get('train_val_ratio', 0.80) +val_split_ratio = params['prepare_data'].get('val_split_ratio', 0.10) + +input_dir = os.path.join(PARSED_DATA_DIR, str(receiver_name), str(jamming_type), str(jamming_power)) +output_dir = params.get("output_dir", "processed_data") +os.makedirs(output_dir, exist_ok=True) + measurement_defs = get_measurement_definitions(input_dir) if not measurement_defs: - print(f" WARNING - No valid measurement pairs found in {input_dir}.") - exit(0) - -# (3) Split: + print(f"ERROR - No data found in {input_dir}") + exit(1) random.seed(seed) random.shuffle(measurement_defs) n = len(measurement_defs) -n_train = int(train_ratio * n) -n_val = int(val_ratio * n) +n_train_val = int(train_val_ratio * n) +n_val = int(val_split_ratio * n_train_val) +n_train = n_train_val - n_val train_defs = measurement_defs[:n_train] -val_defs = measurement_defs[n_train : n_train + n_val] -test_defs = measurement_defs[n_train + n_val :] -print(f"Split: {len(train_defs)} train, {len(val_defs)} val, {len(test_defs)} test time series") -print("Train IDs:", [d['id'] for d in train_defs]) -print("Val IDs:", [d['id'] for d in val_defs]) -print("Test IDs:", [d['id'] for d in test_defs]) +val_defs = measurement_defs[n_train : n_train_val] +test_defs = measurement_defs[n_train_val :] +print(f"Total files: {n}") +print(f"Split: {len(train_defs)} train, {len(val_defs)} val, {len(test_defs)} test") -######################################################################### +print("\n Phase 1: Processing train and validation data ") +train_measurements = dataset.load_all_measurements(train_defs) +val_measurements = dataset.load_all_measurements(val_defs) + +print("\n Phase 2: Processing test data ") +if test_defs: + test_measurements = dataset.load_all_measurements(test_defs) +else: + print("WARNING - No test data found.") + test_measurements = [] -print("\n--- FAZA 1: Obrada Trening i Validacijskih podataka ---") +print("INFO - Computing total number of unique satellites...") +all_sids = set() +for m in train_measurements + val_measurements + test_measurements: + for ts_sids in m['satellite_s_ids']: + all_sids.update(ts_sids['satellite_s_ids'].tolist()) -print(" INFO - Učitavanje trening i validacijskih mjerenja...") -train_measurements = dataset.load_all_measurements(train_defs) -val_measurements = dataset.load_all_measurements(val_defs) +num_total_sats = int(max(all_sids) + 1) if all_sids else 20 +print(f"INFO - Found {len(all_sids)} unique satellites. num_total_sats = {num_total_sats}") -print(" INFO - Fitanje 'train' scalera na trening podacima") -agg_train_rec_feats, agg_train_sat_feats, agg_train_targ_rec = dataset.aggregate_for_normalization(train_measurements) -if not all(len(x) > 0 for x in [agg_train_rec_feats, agg_train_sat_feats, agg_train_targ_rec]): - raise ValueError("Nema podataka za fitanje trening scalera.") -agg_train_rec_feats_2d = np.vstack(agg_train_rec_feats) -agg_train_sat_feats_2d = np.vstack(agg_train_sat_feats) if agg_train_sat_feats else np.empty((0,3)) -agg_train_targ_rec_2d = np.vstack(agg_train_targ_rec) +agg_rec, agg_sat, agg_targ = dataset.aggregate_for_normalization(train_measurements) + +if agg_rec.size == 0 or agg_sat.size == 0: + raise ValueError("Not enough data to fit scalers — check training split.") rec_scaler_train, sat_scaler_train, targ_scaler_train = dataset.fit_standard_scalers( - agg_train_rec_feats_2d, agg_train_sat_feats_2d, agg_train_targ_rec_2d + agg_rec, agg_sat, agg_targ ) -print("INFO - Normalizacija trening i validacijskih podataka...") -normalized_train_measurements = normalize_with_scalers(train_measurements, rec_scaler_train, sat_scaler_train, targ_scaler_train) -normalized_val_measurements = normalize_with_scalers(val_measurements, rec_scaler_train, sat_scaler_train, targ_scaler_train) - -train_signals = dataset.create_signals(normalized_train_measurements) -val_signals = dataset.create_signals(normalized_val_measurements) -#################################################################### +train_measurements = dataset.normalize_with_scalers( + train_measurements, rec_scaler_train, sat_scaler_train, targ_scaler_train) -print("\n--- FAZA 2: Obrada Testnih podataka ---") +val_measurements = dataset.normalize_with_scalers( + val_measurements, rec_scaler_train, sat_scaler_train, targ_scaler_train) -print(" INFO - Učitavanje testnih mjerenja...") -test_measurements = dataset.load_all_measurements(test_defs) +train_signals = dataset.create_signals(train_measurements) +val_signals = dataset.create_signals(val_measurements) if test_measurements: - print(" INFO - Fitanje 'test' scalera na testnim podacima...") - agg_test_rec_feats, agg_test_sat_feats, agg_test_targ_rec = dataset.aggregate_for_normalization(test_measurements) - if not all(len(x) > 0 for x in [agg_test_rec_feats, agg_test_sat_feats, agg_test_targ_rec]): - raise ValueError("Nema podataka za fitanje test scalera.") - - agg_test_rec_feats_2d = np.vstack(agg_test_rec_feats) - agg_test_sat_feats_2d = np.vstack(agg_test_sat_feats) if agg_test_sat_feats else np.empty((0,3)) - agg_test_targ_rec_2d = np.vstack(agg_test_targ_rec) - + agg_rec_t, agg_sat_t, agg_targ_t = dataset.aggregate_for_normalization(test_measurements) rec_scaler_test, sat_scaler_test, targ_scaler_test = dataset.fit_standard_scalers( - agg_test_rec_feats_2d, agg_test_sat_feats_2d, agg_test_targ_rec_2d - ) - - print(" INFO - Normalizacija testnih podataka...") - normalized_test_measurements = normalize_with_scalers(test_measurements, rec_scaler_test, sat_scaler_test, targ_scaler_test) - test_signals = dataset.create_signals(normalized_test_measurements) + agg_rec_t, agg_sat_t, agg_targ_t) + test_measurements = dataset.normalize_with_scalers( + test_measurements, rec_scaler_test, sat_scaler_test, targ_scaler_test) + test_signals = dataset.create_signals(test_measurements) else: - print(" WARNIGN - Nema testnih podataka za obradu. Testni skup će biti prazan.") - test_signals = [] - rec_scaler_test, sat_scaler_test, targ_scaler_test = None, None, None - + test_signals = [] + rec_scaler_test = None -print(f"\nINFO - Spremanje svih podataka i scalera u: {output_dir}") +print(f"\nSaving processed data to: {output_dir}") -torch.save(train_signals, os.path.join(output_dir, 'train_graphs.pt')) -torch.save(val_signals, os.path.join(output_dir, 'val_graphs.pt')) -torch.save(test_signals, os.path.join(output_dir, 'test_graphs.pt')) +torch.save(train_signals, os.path.join(output_dir, 'train_graphs.pt')) +torch.save(val_signals, os.path.join(output_dir, 'val_graphs.pt')) +torch.save(test_signals, os.path.join(output_dir, 'test_graphs.pt')) +torch.save(num_total_sats, os.path.join(output_dir, 'num_total_sats.pt')) -print("INFO - Spremanje 'train' seta scalera...") -torch.save(rec_scaler_train, os.path.join(output_dir, 'rec_scaler_train.pt')) -torch.save(sat_scaler_train, os.path.join(output_dir, 'sat_scaler_train.pt')) +torch.save(rec_scaler_train, os.path.join(output_dir, 'rec_scaler_train.pt')) +torch.save(sat_scaler_train, os.path.join(output_dir, 'sat_scaler_train.pt')) torch.save(targ_scaler_train, os.path.join(output_dir, 'targ_scaler_train.pt')) if rec_scaler_test: - print("INFO - Spremanje 'test' seta scalera...") - torch.save(rec_scaler_test, os.path.join(output_dir, 'rec_scaler_test.pt')) - torch.save(sat_scaler_test, os.path.join(output_dir, 'sat_scaler_test.pt')) + torch.save(rec_scaler_test, os.path.join(output_dir, 'rec_scaler_test.pt')) + torch.save(sat_scaler_test, os.path.join(output_dir, 'sat_scaler_test.pt')) torch.save(targ_scaler_test, os.path.join(output_dir, 'targ_scaler_test.pt')) -print("\nPriprema podataka je završena.") - - -''' -# (4) Load and process train measurements only for scaler fitting -train_measurements = dataset.load_all_measurements(train_defs, window_size) -val_measurements = dataset.load_all_measurements(val_defs, window_size) -test_measurements = dataset.load_all_measurements(test_defs, window_size) - -agg_train_rec_feats, agg_train_sat_feats, agg_train_targ_rec = dataset.aggregate_for_normalization(train_measurements) -if len(agg_train_rec_feats) == 0 or len(agg_train_sat_feats) == 0 or len(agg_train_targ_rec) == 0: - print(f"[WARNING] No valid features found for training in {input_dir}. Skipping.") - exit(0) - -try: - # Receiver features: stack to [N, F] - agg_train_rec_feats_2d = np.vstack(agg_train_rec_feats) -except Exception as e: - print(f"WARNING- Could not stack receiver features: {e}. Skipping.") - exit(0) - -# Satellite features: stack all non-empty, check all have isti broj feature-a -non_empty_sat_feats = [fs for fs in agg_train_sat_feats if fs.size > 0] -if non_empty_sat_feats: - n_features = non_empty_sat_feats[0].shape[1] - if not all(fs.shape[1] == n_features for fs in non_empty_sat_feats): - print(f"WARNING - Inconsistent satellite feature dimensions (columns) in {input_dir}. Skipping.") - exit(0) - agg_train_sat_feats_2d = np.vstack(non_empty_sat_feats) -else: - n_features = agg_train_rec_feats_2d.shape[1] if agg_train_rec_feats_2d.size > 0 else 0 - agg_train_sat_feats_2d = np.empty((0, n_features)) - -try: - agg_train_targ_rec_2d = np.vstack(agg_train_targ_rec) -except Exception as e: - print(f"WARNING - Could not stack target features: {e}. Skipping.") - exit(0) - -# (5) Fit scalers on train set - -rec_scaler, sat_scaler, targ_scaler = dataset.fit_standard_scalers( - agg_train_rec_feats_2d, agg_train_sat_feats_2d, agg_train_targ_rec_2d) - -# (6) Normalize sve skupove koristeci train scalere -normalized_train_measurements = normalize_with_scalers(train_measurements, rec_scaler, sat_scaler, targ_scaler) -normalized_val_measurements = normalize_with_scalers(val_measurements, rec_scaler, sat_scaler, targ_scaler) -normalized_test_measurements = normalize_with_scalers(test_measurements, rec_scaler, sat_scaler, targ_scaler) - -# (7) Convert to graph objects (signals) -train_signals = dataset.create_signals(normalized_train_measurements) -val_signals = dataset.create_signals(normalized_val_measurements) -test_signals = dataset.create_signals(normalized_test_measurements) - -# (8) Save outputs -print(f"Saving {len(train_signals)} train, {len(val_signals)} val, {len(test_signals)} test graphs to {output_dir}") -torch.save(train_signals, os.path.join(output_dir, 'train_graphs.pt')) -torch.save(val_signals, os.path.join(output_dir, 'val_graphs.pt')) -torch.save(test_signals, os.path.join(output_dir, 'test_graphs.pt')) -torch.save(rec_scaler, os.path.join(output_dir, 'rec_scaler.pt')) -torch.save(sat_scaler, os.path.join(output_dir, 'sat_scaler.pt')) -torch.save(targ_scaler, os.path.join(output_dir, 'targ_scaler.pt')) - -print("Data preparation complete.") -''' \ No newline at end of file +print("\nData preparation complete.") \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index 3d3a625..0000000 --- a/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[build-system] -requires = ["flit_core >=3.2,<4"] -build-backend = "flit_core.buildapi" - -[project] -name = "gnss" -version = "0.0.1" -description = "A short description of the project." -authors = [ - { name = "Your name (or your organization/company/team)" }, -] -license = { file = "LICENSE" } -readme = "README.md" -classifiers = [ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License" -] -requires-python = "~=3.10.0" - - -[tool.ruff] -line-length = 99 -src = ["gnss"] -include = ["pyproject.toml", "gnss/**/*.py"] - -[tool.ruff.lint] -extend-select = ["I"] # Add import sorting - -[tool.ruff.lint.isort] -known-first-party = ["gnss"] -force-sort-within-sections = true - diff --git a/references/.gitkeep b/references/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/reports/.gitkeep b/reports/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/reports/figures/.gitkeep b/reports/figures/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/run_all_experiments.py b/run_all_experiments.py index 8217772..975d4cc 100644 --- a/run_all_experiments.py +++ b/run_all_experiments.py @@ -3,138 +3,166 @@ import sys from pathlib import Path import yaml -import itertools +import itertools -DATA_ROOT = Path("~/shared/Ivana_GNN/Sateliti/GNSSGraphDetect/data/parsed").expanduser() PARAMS_FILE = Path("params.yaml") -EXCLUDED_JAMMING_TYPES = {"none", ".ipynb_checkpoints"} -EXCLUDED_RECEIVERS = {"Ublox6", ".ipynb_checkpoints"} + +EXCLUDED_RECEIVERS = {"Ublox6"} +EXCLUDED_JAMMING_TYPES = {"none"} + +REPORTS_ROOT = "PARSED_REPORTS" +PROCESSED_ROOT = "PARSED_PROCESSED" + def _split(comma_separated: str | None) -> set | None: return set(comma_separated.split(",")) if comma_separated else None + def parse_args() -> argparse.Namespace: - p = argparse.ArgumentParser(description="Run GNSS-Graph-Detect experiments.", allow_abbrev=False) - p.add_argument("-r", "--receivers", help="Comma-separated list of receivers to INCLUDE") - p.add_argument("-j", "--jams", help="Comma-separated list of jamming types to INCLUDE") - p.add_argument("-p", "--powers", help="Comma-separated list of jamming power folders to INCLUDE") - p.add_argument("--seeds", help="Comma-separated list of seeds (default: 42,145,156,5,85)") - p.add_argument("--dry-run", action="store_true", help="Print planned experiments and exit") + p = argparse.ArgumentParser( + description="JaGuard: Automated Experimental Sweep", + allow_abbrev=False + ) + p.add_argument( + "--data-root", + default="gnss/data/parsed", + help="Path to root data directory (default: gnss/data/parsed)" + ) + p.add_argument( + "-r", "--receivers", + help="Comma-separated list of receivers (e.g., Ublox10,GP01)" + ) + p.add_argument( + "--seeds", + help="Comma-separated list of seeds (default: 42,789,1011,1263,2024)" + ) + p.add_argument( + "--dry-run", + action="store_true", + help="Print experiment matrix without executing" + ) return p.parse_args() -def discover_experiments(root: Path, want_receivers: set, want_jams: set, want_powers: set) -> list[dict]: - ''' - Prolazi kroz direktorije i pronalazi sve validne kombinacije - receiver/jamming_type/power. - ''' + +def discover_experiments(root: Path, want_receivers: set) -> list[dict]: + """ + Locates all receiver, jamming_type, and jamming_power combinations. + Assumes structure: DATA_ROOT / receiver / jamming_type / jamming_power + """ experiments = [] + if not root.exists(): + print(f"ERROR: DATA_ROOT {root} not found!") + return experiments + for receiver_dir in root.iterdir(): if not receiver_dir.is_dir() or receiver_dir.name in EXCLUDED_RECEIVERS: continue if want_receivers and receiver_dir.name not in want_receivers: continue - for jam_dir in receiver_dir.iterdir(): - if not jam_dir.is_dir() or jam_dir.name in EXCLUDED_JAMMING_TYPES: - continue - if want_jams and jam_dir.name not in want_jams: + for jamming_dir in receiver_dir.iterdir(): + if not jamming_dir.is_dir() or jamming_dir.name in EXCLUDED_JAMMING_TYPES: continue - for power_dir in jam_dir.iterdir(): + for power_dir in jamming_dir.iterdir(): if not power_dir.is_dir(): continue - if want_powers and power_dir.name not in want_powers: - continue - + experiments.append({ - "receiver": receiver_dir.name, - "jamming_type": jam_dir.name, - "power": power_dir.name + "receiver": receiver_dir.name, + "jamming_type": jamming_dir.name, + "jamming_power": power_dir.name }) + return experiments + def run_single_experiment(exp_params: dict, seed: int, dry_run: bool) -> bool: - ''' - Ažurira params.yaml i pokreće 'dvc repro' za jedan eksperiment - Vraća True ako je uspješno, False ako nije. - ''' - exp_name = f"exp_{exp_params['receiver']}_{exp_params['jamming_type']}_{exp_params['power']}_seed{seed}".replace("-", "m") - print(f"> Running: {exp_name}") - + """ + Updates params.yaml and executes the DVC pipeline for a single configuration. + """ + rec = exp_params['receiver'] + jt = exp_params['jamming_type'] + jp = exp_params['jamming_power'] + exp_id = f"{rec}_{jt}_{jp}_seed{seed}" + + print(f"\n>>> INITIATING EXPERIMENT: {exp_id}") + if dry_run: return True - # Update params.yaml try: with open(PARAMS_FILE, "r") as f: params = yaml.safe_load(f) except FileNotFoundError: - print(f"ERROR: {PARAMS_FILE} not found!", file=sys.stderr) + print(f"ERROR: {PARAMS_FILE} missing!", file=sys.stderr) return False - params["receiver_name"] = exp_params["receiver"] - params["jamming_type"] = exp_params["jamming_type"] - params["jamming_power"] = exp_params["power"] - params["seed"] = seed + params["receiver_name"] = rec + params["jamming_type"] = jt + params["jamming_power"] = jp + params["seed"] = seed + + if "train" not in params: + params["train"] = {} - if "train" in params: - params["train"]["output_dir"] = f"reports/{exp_params['receiver']}/{exp_params['jamming_type']}/{exp_params['power']}/seed_{seed}" - params["output_dir"] = f"data/processed/{exp_params['receiver']}/{exp_params['jamming_type']}/{exp_params['power']}/seed_{seed}" + rel_path = f"{rec}/{jt}/{jp}/seed_{seed}" + params["output_dir"] = f"{PROCESSED_ROOT}/{rel_path}" + params["train"]["output_dir"] = f"{REPORTS_ROOT}/{rel_path}" with open(PARAMS_FILE, "w") as f: yaml.dump(params, f) - # Run DVC pipeline : res = subprocess.run(["dvc", "repro"], check=False) - #res = subprocess.run(["dvc", "repro"], check=False, capture_output=True, text=True) + if res.returncode != 0: - print(f"! Experiment {exp_name} FAILED", file=sys.stderr) - print("--- DVC STDOUT ---", file=sys.stderr) - print(res.stdout, file=sys.stderr) - print("--- DVC STDERR ---", file=sys.stderr) - print(res.stderr, file=sys.stderr) + print(f"! CRITICAL: Experiment {exp_id} FAILED during DVC execution", file=sys.stderr) return False - + return True + def main() -> None: ns = parse_args() + + DATA_ROOT = Path(ns.data_root).expanduser() want_receivers = _split(ns.receivers) - want_jams = _split(ns.jams) - want_powers = _split(ns.powers) - seeds = [int(s.strip()) for s in ns.seeds.split(",")] if ns.seeds else [42, 145, 156, 5, 85] + seeds_str = ns.seeds if ns.seeds else "42,789,1011,1263,2024" + seeds = [int(s.strip()) for s in seeds_str.split(",")] + + print("--- Discovering target configurations ---") + experiments_to_run = discover_experiments(DATA_ROOT, want_receivers) - print("--- Discovering experiments ---") - experiments_to_run = discover_experiments(DATA_ROOT, want_receivers, want_jams, want_powers) - if not experiments_to_run: - print("No experiment combinations found for the selected filters.") + print("No valid receiver/jamming configurations found.") return - # Spajamo eksperimente i seedove u jednu listu all_combinations = list(itertools.product(experiments_to_run, seeds)) - print(f"Found {len(experiments_to_run)} experiment configurations, running for {len(seeds)} seeds.") - print(f"Total experiments to run: {len(all_combinations)}") + print(f"Configurations discovered: {len(experiments_to_run)}") + print(f"Seeds to test: {seeds}") + print(f"Total pipeline runs scheduled: {len(all_combinations)}") all_failures = [] + for exp_params, seed in all_combinations: success = run_single_experiment(exp_params, seed, ns.dry_run) - if not success: - exp_name = f"{exp_params['receiver']}_{exp_params['jamming_type']}_{exp_params['power']}_seed{seed}" - all_failures.append(exp_name) + if not ns.dry_run and not success: + rec = exp_params['receiver'] + jt = exp_params['jamming_type'] + jp = exp_params['jamming_power'] + all_failures.append(f"{rec}_{jt}_{jp}_seed{seed}") - # Ispis rezultata if ns.dry_run: - print("\n(Dry-run complete - no experiments executed.)") + print("\n(Dry-run complete. No changes made.)") return - + if all_failures: - print("\n--- Some experiments failed: ---") + print("\n--- EXPERIMENT FAILURES ---") for name in all_failures: print(f" - {name}") else: - print("\n--- All selected experiments completed successfully!--- + print("\n--- ALL EXPERIMENTS COMPLETED SUCCESSFULLY! ---") + if __name__ == "__main__": - main() - \ No newline at end of file + main() \ No newline at end of file diff --git a/run_experiment.py b/run_experiment.py index c6c4092..9a07272 100644 --- a/run_experiment.py +++ b/run_experiment.py @@ -1,15 +1,15 @@ -import numpy as np -import pandas as pd import yaml import os import random import torch +import numpy as np +#import time from pathlib import Path -import pickle -from gnss.modeling.train import train_lightning_model + +from gnss.train.train import train_lightning_model from dvclive.lightning import DVCLiveLogger -from gnss.config import PARSED_DATA_DIR -#torch.set_float32_matmul_precision('high') + +torch.set_float32_matmul_precision('high') def main(): @@ -23,66 +23,72 @@ def main(): if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) - output_dir = Path(params['train']['output_dir']) + output_dir = Path(params['train']['output_dir']) data_out_dir = Path(params['output_dir']) - print(f"Loading data from: {data_out_dir}") - train_signals = torch.load(data_out_dir / 'train_graphs.pt') - val_signals = torch.load(data_out_dir / 'val_graphs.pt') - test_signals = torch.load(data_out_dir / 'test_graphs.pt') - - print("Učitavanje dva seta scalera...") + train_signals = torch.load(data_out_dir / 'train_graphs.pt', weights_only=False) + val_signals = torch.load(data_out_dir / 'val_graphs.pt', weights_only=False) + test_signals = torch.load(data_out_dir / 'test_graphs.pt', weights_only=False) + scalers_for_train_val = { - 'rec': torch.load(data_out_dir / 'rec_scaler_train.pt'), - 'sat': torch.load(data_out_dir / 'sat_scaler_train.pt'), - 'targ': torch.load(data_out_dir / 'targ_scaler_train.pt') + 'rec': torch.load(data_out_dir / 'rec_scaler_train.pt', weights_only=False), + 'sat': torch.load(data_out_dir / 'sat_scaler_train.pt', weights_only=False), + 'targ': torch.load(data_out_dir / 'targ_scaler_train.pt', weights_only=False) } scalers_for_test = { - 'rec': torch.load(data_out_dir / 'rec_scaler_test.pt'), - 'sat': torch.load(data_out_dir / 'sat_scaler_test.pt'), - 'targ': torch.load(data_out_dir / 'targ_scaler_test.pt') + 'rec': torch.load(data_out_dir / 'rec_scaler_test.pt', weights_only=False), + 'sat': torch.load(data_out_dir / 'sat_scaler_test.pt', weights_only=False), + 'targ': torch.load(data_out_dir / 'targ_scaler_test.pt', weights_only=False) } + num_total_sats = torch.load(data_out_dir / 'num_total_sats.pt', weights_only=False) + + # Extract graph metadata from first training snapshot metadata = train_signals[0][0].metadata() if train_signals and train_signals[0] else None - + if not metadata: + raise ValueError("Could not extract metadata from training signals.") + logger = DVCLiveLogger( dir=str(output_dir / "dvclive"), save_dvc_exp=False, dvcyaml=False ) - config = { - **params, + **params, 'output_dir': str(output_dir), 'logger': logger } + + print("\nINFO - Starting training...") + start_time = time.time() + trainer, best_model_path, best_metrics = train_lightning_model( - train_signals, - val_signals, - test_signals, - scalers_for_train_val, - scalers_for_test, - metadata, - config + train_signals, val_signals, test_signals, + scalers_for_train_val, scalers_for_test, + metadata, config, num_total_sats=num_total_sats ) + if hasattr(logger, "finalize"): logger.finalize("success") if best_metrics: print("\n--- Final metrics on test set ---") - metrics_to_save = {k: v.item() if isinstance(v, torch.Tensor) else v for k, v in best_metrics.items()} + metrics_to_save = { + k: v.item() if isinstance(v, torch.Tensor) else v + for k, v in best_metrics.items() + } print(yaml.dump(metrics_to_save, indent=2)) - with open(output_dir / 'metrics.yaml', 'w') as f: yaml.dump(metrics_to_save, f) - + if best_model_path: - print(f"\nBest model saved on: {best_model_path}") - - print(f"\nLogs saved on: {output_dir}") + print(f"\nBest model saved at: {best_model_path}") + + print(f"\nLogs saved to: {output_dir}") + if __name__ == '__main__': - main() + main() \ No newline at end of file From 4fd6ab3275af103723daf1463be072c46bb71359 Mon Sep 17 00:00:00 2001 From: IkaKes <166716424+IkaKes@users.noreply.github.com> Date: Wed, 18 Mar 2026 17:33:16 +0100 Subject: [PATCH 05/11] Update requirements.txt --- requirements.txt | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index ff92702..a772c63 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,44 @@ pytest python-dotenv #scikit-learn tqdm -typer +typer--extra-index-url https://download.pytorch.org/whl/cu124 +--find-links https://data.pyg.org/whl/torch-2.4.0+cu124.html + +ipython +jupyterlab +loguru +matplotlib +mkdocs +notebook +#numpy==1.24.4 +#scipy +#pandas==1.5.3 +#pip +pytest +python-dotenv +#scikit-learn + +# PyTorch core +torch==2.4.0+cu124 +torchvision==0.19.0+cu124 +torchaudio==2.4.0+cu124 + +# —–– PyG low-level CUDA kernels: +pyg_lib==0.4.0+pt24cu124 +torch_scatter==2.1.2+pt24cu124 +torch_sparse==0.6.18+pt24cu124 +torch_cluster==1.6.3+pt24cu124 +torch_spline_conv==1.2.2+pt24cu124 + +# —–– PyG high-level: +torch-geometric==2.3.1 +torch-geometric-temporal==0.56.0 +pytorch-lightning==2.2.4 +# DVC for experiment tracking +dvc +dvclive +# YAML parsing +pyyaml -e . # PyTorch core @@ -38,4 +75,4 @@ pytorch-lightning==2.2.4 dvc dvclive # YAML parsing -pyyaml \ No newline at end of file +pyyaml From a8795077deca5202d69c36b5dd89d28905deebcb Mon Sep 17 00:00:00 2001 From: IkaKes <166716424+IkaKes@users.noreply.github.com> Date: Wed, 18 Mar 2026 17:58:02 +0100 Subject: [PATCH 06/11] Update README --- README.md | 144 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 84 insertions(+), 60 deletions(-) diff --git a/README.md b/README.md index 0614b01..d4a0029 100644 --- a/README.md +++ b/README.md @@ -1,61 +1,85 @@ -# GNSS - - - - - -A short description of the project. - -## Project Organization - -``` -├── LICENSE <- Open-source license if one is chosen -├── Makefile <- Makefile with convenience commands like `make data` or `make train` -├── README.md <- The top-level README for developers using this project. -├── data -│ ├── external <- Data from third party sources. -│ ├── interim <- Intermediate data that has been transformed. -│ ├── processed <- The final, canonical data sets for modeling. -│ └── raw <- The original, immutable data dump. -│ -├── docs <- A default mkdocs project; see www.mkdocs.org for details -│ -├── models <- Trained and serialized models, model predictions, or model summaries -│ -├── notebooks <- Jupyter notebooks. Naming convention is a number (for ordering), -│ the creator's initials, and a short `-` delimited description, e.g. -│ `1.0-jqp-initial-data-exploration`. -│ -├── pyproject.toml <- Project configuration file with package metadata for -│ gnss and configuration for tools like black -│ -├── references <- Data dictionaries, manuals, and all other explanatory materials. -│ -├── reports <- Generated analysis as HTML, PDF, LaTeX, etc. -│ └── figures <- Generated graphics and figures to be used in reporting -│ -├── requirements.txt <- The requirements file for reproducing the analysis environment, e.g. -│ generated with `pip freeze > requirements.txt` -│ -├── setup.cfg <- Configuration file for flake8 -│ -└── gnss <- Source code for use in this project. - │ - ├── __init__.py <- Makes gnss a Python module - │ - ├── config.py <- Store useful variables and configuration - │ - ├── dataset.py <- Scripts to download or generate data - │ - ├── features.py <- Code to create features for modeling - │ - ├── modeling - │ ├── __init__.py - │ ├── predict.py <- Code to run model inference with trained models - │ └── train.py <- Code to train models - │ - └── plots.py <- Code to create visualizations -``` - --------- +# JaGuard: Jamming Correction of GNSS Deviation with Deep Temporal Graphs + +

+ Receiver-centric deep temporal graph learning for GNSS jamming mitigation +

+ +

+ + + + +

+ +--- + +## Overview + +[cite_start]**JaGuard (Jamming Guardian)** is the first deep temporal graph neural network designed to estimate and correct jamming-induced positional drift in GNSS systems. + +[cite_start]Unlike reactive anomaly detection, JaGuard reformulates mitigation as a **dynamic graph regression problem**. [cite_start]It models the satellite-receiver constellation as a sequence of heterogeneous star graphs, capturing the physical deterioration of the signal over time[cite: 9, 16, 120]. + +### Key Features +**Dynamic Star Graph:** Models the receiver as a central node and visible satellites as leaf nodes. +**Spatiotemporal Fusion:** Uses a **HeteroGCLSTM** layer to process 10-second windows of signal history. +**Minimalist Input:** Operates exclusively on standard NMEA observables (SNR, Azimuth, Elevation; Latitude and Longitude). +**High Resilience:** Maintains centimeter-level accuracy even under severe -45 dBm jamming and data starvation. + + + +--- + +## Project Structure + + +. +├── gnss/ # Core library +│ ├── train/ # Training logic and LSTM gate definitions +│ ├── dataset.py # Graph construction, sliding windows & normalization +│ └── model.py # JaGuard architecture (HeteroGCLSTM) +├── params.yaml # Central experiment configuration +├── prepare_data.py # Data preprocessing (NMEA → Z-score normalized graphs) +├── run_experiment.py # Execution for a single configuration/seed +├── run_all_experiments.py # Master script for automated experimental sweeps +├── dvc.yaml # DVC pipeline orchestration +└── README.md + +## Installation + +Make sure you have [Conda](https://docs.conda.io/en/latest/) installed: + +# 1. Create environment +conda create --solver classic -n gnss-py310 \ + python=3.10 \ + numpy=1.24.4 \ + scipy=1.15.2 \ + pandas=1.3.5 \ + scikit-learn \ + -c conda-forge -y + +# 2. Activate environment +source $(conda info --base)/etc/profile.d/conda.sh +conda activate gnss-py310 + +# 3. Install remaining dependencies +pip install -r requirements.txt + + +## Automated Pipeline + +This project is fully instrumented with Data Version Control (DVC) to ensure reproducibility. To simplify the research workflow, we use an automated sweep script that manages parameter updates and triggers the DVC pipeline internally. +To automate the evaluation across all discovered receivers, jamming types, and power levels, use the run_all_experiments.py script. This script automatically updates params.yaml for each configuration and executes dvc repro for you. +# Run the full sweep with default settings +python run_all_experiments.py + +# Optional: Run a dry-run to see the experiment matrix without executing +python run_all_experiments.py --dry-run + +# Optional: Filter by specific receivers or define custom seeds +python run_all_experiments.py --receivers Ublox10,GP01 --seeds 42,2024 + +## Citation + + + From d69c9f49f5f164c2d0b834058a7c28445c1785fe Mon Sep 17 00:00:00 2001 From: IkaKes <166716424+IkaKes@users.noreply.github.com> Date: Wed, 18 Mar 2026 17:59:44 +0100 Subject: [PATCH 07/11] Updated README Updated formatting and headings in README.md for clarity and consistency. --- README.md | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index d4a0029..5cb7205 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,5 @@ # JaGuard: Jamming Correction of GNSS Deviation with Deep Temporal Graphs -

- Receiver-centric deep temporal graph learning for GNSS jamming mitigation -

- -

- - - - -

- ---- ## Overview @@ -27,8 +15,6 @@ ---- - ## Project Structure @@ -48,7 +34,7 @@ Make sure you have [Conda](https://docs.conda.io/en/latest/) installed: -# 1. Create environment +### 1. Create environment conda create --solver classic -n gnss-py310 \ python=3.10 \ numpy=1.24.4 \ @@ -57,11 +43,11 @@ conda create --solver classic -n gnss-py310 \ scikit-learn \ -c conda-forge -y -# 2. Activate environment +### 2. Activate environment source $(conda info --base)/etc/profile.d/conda.sh conda activate gnss-py310 -# 3. Install remaining dependencies +### 3. Install remaining dependencies pip install -r requirements.txt @@ -69,13 +55,13 @@ pip install -r requirements.txt This project is fully instrumented with Data Version Control (DVC) to ensure reproducibility. To simplify the research workflow, we use an automated sweep script that manages parameter updates and triggers the DVC pipeline internally. To automate the evaluation across all discovered receivers, jamming types, and power levels, use the run_all_experiments.py script. This script automatically updates params.yaml for each configuration and executes dvc repro for you. -# Run the full sweep with default settings +### Run the full sweep with default settings python run_all_experiments.py -# Optional: Run a dry-run to see the experiment matrix without executing +### Optional: Run a dry-run to see the experiment matrix without executing python run_all_experiments.py --dry-run -# Optional: Filter by specific receivers or define custom seeds +#### Optional: Filter by specific receivers or define custom seeds python run_all_experiments.py --receivers Ublox10,GP01 --seeds 42,2024 ## Citation From ac74b566a30828bc6235dbb01b66b2a7a184f64c Mon Sep 17 00:00:00 2001 From: IkaKes <166716424+IkaKes@users.noreply.github.com> Date: Wed, 18 Mar 2026 18:02:10 +0100 Subject: [PATCH 08/11] Refine README formatting and content clarity Updated formatting and corrected minor text inconsistencies in the README. --- README.md | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 5cb7205..57d529e 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,12 @@ # JaGuard: Jamming Correction of GNSS Deviation with Deep Temporal Graphs - ## Overview -[cite_start]**JaGuard (Jamming Guardian)** is the first deep temporal graph neural network designed to estimate and correct jamming-induced positional drift in GNSS systems. +**JaGuard (Jamming Guardian)** is the first deep temporal graph neural network designed to estimate and correct jamming-induced positional drift in GNSS systems. -[cite_start]Unlike reactive anomaly detection, JaGuard reformulates mitigation as a **dynamic graph regression problem**. [cite_start]It models the satellite-receiver constellation as a sequence of heterogeneous star graphs, capturing the physical deterioration of the signal over time[cite: 9, 16, 120]. +Unlike reactive anomaly detection, JaGuard reformulates mitigation as a **dynamic graph regression problem**. [cite_start]It models the satellite-receiver constellation as a sequence of heterogeneous star graphs, capturing the physical deterioration of the signal over time. -### Key Features +### Key Features: **Dynamic Star Graph:** Models the receiver as a central node and visible satellites as leaf nodes. **Spatiotemporal Fusion:** Uses a **HeteroGCLSTM** layer to process 10-second windows of signal history. **Minimalist Input:** Operates exclusively on standard NMEA observables (SNR, Azimuth, Elevation; Latitude and Longitude). @@ -17,8 +16,7 @@ ## Project Structure - -. +```text ├── gnss/ # Core library │ ├── train/ # Training logic and LSTM gate definitions │ ├── dataset.py # Graph construction, sliding windows & normalization @@ -55,13 +53,13 @@ pip install -r requirements.txt This project is fully instrumented with Data Version Control (DVC) to ensure reproducibility. To simplify the research workflow, we use an automated sweep script that manages parameter updates and triggers the DVC pipeline internally. To automate the evaluation across all discovered receivers, jamming types, and power levels, use the run_all_experiments.py script. This script automatically updates params.yaml for each configuration and executes dvc repro for you. -### Run the full sweep with default settings +### 1. Run the full sweep with default settings python run_all_experiments.py -### Optional: Run a dry-run to see the experiment matrix without executing +### 2. Optional: Run a dry-run to see the experiment matrix without executing python run_all_experiments.py --dry-run -#### Optional: Filter by specific receivers or define custom seeds +#### 3. Optional: Filter by specific receivers or define custom seeds python run_all_experiments.py --receivers Ublox10,GP01 --seeds 42,2024 ## Citation From d78f006fdfff4f153b0f46f5d2a2b7df891b1c67 Mon Sep 17 00:00:00 2001 From: IkaKes <166716424+IkaKes@users.noreply.github.com> Date: Wed, 18 Mar 2026 18:06:28 +0100 Subject: [PATCH 09/11] Refactor README.md for clarity and formatting --- README.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 57d529e..cb6ef7c 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,7 @@ ## Overview **JaGuard (Jamming Guardian)** is the first deep temporal graph neural network designed to estimate and correct jamming-induced positional drift in GNSS systems. - -Unlike reactive anomaly detection, JaGuard reformulates mitigation as a **dynamic graph regression problem**. [cite_start]It models the satellite-receiver constellation as a sequence of heterogeneous star graphs, capturing the physical deterioration of the signal over time. +JaGuard reformulates mitigation as a **dynamic graph regression problem**. It models the satellite-receiver constellation as a sequence of heterogeneous star graphs, capturing the physical deterioration of the signal over time. ### Key Features: **Dynamic Star Graph:** Models the receiver as a central node and visible satellites as leaf nodes. @@ -13,7 +12,6 @@ Unlike reactive anomaly detection, JaGuard reformulates mitigation as a **dynami **High Resilience:** Maintains centimeter-level accuracy even under severe -45 dBm jamming and data starvation. - ## Project Structure ```text @@ -24,10 +22,10 @@ Unlike reactive anomaly detection, JaGuard reformulates mitigation as a **dynami ├── params.yaml # Central experiment configuration ├── prepare_data.py # Data preprocessing (NMEA → Z-score normalized graphs) ├── run_experiment.py # Execution for a single configuration/seed -├── run_all_experiments.py # Master script for automated experimental sweeps +├── run_all_experiments.py # Master script for automated experimental sweeps ├── dvc.yaml # DVC pipeline orchestration -└── README.md - +└── README.md +``` ## Installation Make sure you have [Conda](https://docs.conda.io/en/latest/) installed: From 2e7211498627160428ea265010a515dcf7603490 Mon Sep 17 00:00:00 2001 From: IkaKes <166716424+IkaKes@users.noreply.github.com> Date: Wed, 18 Mar 2026 18:09:09 +0100 Subject: [PATCH 10/11] Refine README content for clarity and emphasis --- README.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index cb6ef7c..27df549 100644 --- a/README.md +++ b/README.md @@ -2,14 +2,14 @@ ## Overview -**JaGuard (Jamming Guardian)** is the first deep temporal graph neural network designed to estimate and correct jamming-induced positional drift in GNSS systems. +**JaGuard (Jamming Guardian)** is a deep temporal graph neural network designed to estimate and correct jamming-induced positional drift in GNSS systems. JaGuard reformulates mitigation as a **dynamic graph regression problem**. It models the satellite-receiver constellation as a sequence of heterogeneous star graphs, capturing the physical deterioration of the signal over time. ### Key Features: -**Dynamic Star Graph:** Models the receiver as a central node and visible satellites as leaf nodes. -**Spatiotemporal Fusion:** Uses a **HeteroGCLSTM** layer to process 10-second windows of signal history. -**Minimalist Input:** Operates exclusively on standard NMEA observables (SNR, Azimuth, Elevation; Latitude and Longitude). -**High Resilience:** Maintains centimeter-level accuracy even under severe -45 dBm jamming and data starvation. +- **Dynamic Star Graph:** Models the receiver as a central node and visible satellites as leaf nodes. +- **Spatiotemporal Fusion:** Uses a **HeteroGCLSTM** layer to process 10-second windows of signal history. +- **Minimalist Input:** Operates exclusively on standard NMEA observables (SNR, Azimuth, Elevation; Latitude and Longitude). +- **High Resilience:** Maintains centimeter-level accuracy even under severe -45 dBm jamming and data starvation. ## Project Structure @@ -50,7 +50,8 @@ pip install -r requirements.txt ## Automated Pipeline This project is fully instrumented with Data Version Control (DVC) to ensure reproducibility. To simplify the research workflow, we use an automated sweep script that manages parameter updates and triggers the DVC pipeline internally. -To automate the evaluation across all discovered receivers, jamming types, and power levels, use the run_all_experiments.py script. This script automatically updates params.yaml for each configuration and executes dvc repro for you. +This script automatically updates params.yaml for each configuration and executes dvc repro for you. + ### 1. Run the full sweep with default settings python run_all_experiments.py From 72878a1a2951ab409eec1573e31c69dff7a8ef22 Mon Sep 17 00:00:00 2001 From: IkaKes <166716424+IkaKes@users.noreply.github.com> Date: Wed, 18 Mar 2026 18:11:03 +0100 Subject: [PATCH 11/11] Refine README content --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 27df549..1021f7a 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ ## Overview **JaGuard (Jamming Guardian)** is a deep temporal graph neural network designed to estimate and correct jamming-induced positional drift in GNSS systems. -JaGuard reformulates mitigation as a **dynamic graph regression problem**. It models the satellite-receiver constellation as a sequence of heterogeneous star graphs, capturing the physical deterioration of the signal over time. +JaGuard defines this task as a **dynamic graph regression problem**. It models the satellite-receiver constellation as a sequence of heterogeneous star graphs, capturing the physical deterioration of the signal over time. ### Key Features: - **Dynamic Star Graph:** Models the receiver as a central node and visible satellites as leaf nodes. @@ -16,12 +16,12 @@ JaGuard reformulates mitigation as a **dynamic graph regression problem**. It mo ```text ├── gnss/ # Core library -│ ├── train/ # Training logic and LSTM gate definitions -│ ├── dataset.py # Graph construction, sliding windows & normalization -│ └── model.py # JaGuard architecture (HeteroGCLSTM) +│ ├── train/ # Training logic +│ ├── dataset.py # Graph construction & normalization +│ └── model.py # JaGuard architecture ├── params.yaml # Central experiment configuration -├── prepare_data.py # Data preprocessing (NMEA → Z-score normalized graphs) -├── run_experiment.py # Execution for a single configuration/seed +├── prepare_data.py # Data preprocessing +├── run_experiment.py # Execution for a single configuration ├── run_all_experiments.py # Master script for automated experimental sweeps ├── dvc.yaml # DVC pipeline orchestration └── README.md @@ -49,7 +49,7 @@ pip install -r requirements.txt ## Automated Pipeline -This project is fully instrumented with Data Version Control (DVC) to ensure reproducibility. To simplify the research workflow, we use an automated sweep script that manages parameter updates and triggers the DVC pipeline internally. +This project is fully instrumented with Data Version Control (DVC). To simplify the research workflow, we use an automated sweep script that manages parameter updates and triggers the DVC pipeline internally. This script automatically updates params.yaml for each configuration and executes dvc repro for you. ### 1. Run the full sweep with default settings