Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 33 additions & 28 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,18 +1,3 @@
### JupyterNotebooks ###
# gitignore template for Jupyter Notebooks
# website: http://jupyter.org/

.ipynb_checkpoints
*/.ipynb_checkpoints/*

# IPython
profile_default/
ipython_config.py

# Remove previous ipynb_checkpoints
# git rm -r .ipynb_checkpoints/

### Linux ###
*~

# temporary files which can be created if a process still has a handle open of a deleted file
Expand All @@ -27,19 +12,6 @@ ipython_config.py
# .nfs files are created when an open file is removed but is still being accessed
.nfs*

### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride

# Icon must end with two \r
Icon


# Thumbnails
._*

# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
Expand Down Expand Up @@ -274,3 +246,36 @@ $RECYCLE.BIN/
# Windows shortcuts
*.lnk

# ================================================================
# DVC, podaci i rezultati eksperimenata (NAJVAŽNIJE!)
# ================================================================
/data/
/reports/
/models/
/outputs/
.dvc/cache
/dvclive/
/dvc_plots/
/DvcLiveLogger/

# ================================================================
# Python & Jupyter specifične datoteke
# ================================================================
__pycache__/
*.py[cod]
*$py.class
.ipynb_checkpoints/

# ================================================================
# Virtualni Environment
# ================================================================
.venv/
venv/
env/

# ================================================================
# OS / Editor specifične datoteke (korisno)
# ================================================================
.DS_Store
*~
.vscode/
21 changes: 0 additions & 21 deletions LICENSE

This file was deleted.

69 changes: 68 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1 +1,68 @@
# GNSSGraphDetect
# JaGuard: Jamming Correction of GNSS Deviation with Deep Temporal Graphs

## Overview

**JaGuard (Jamming Guardian)** is a deep temporal graph neural network designed to estimate and correct jamming-induced positional drift in GNSS systems.
JaGuard defines this task as a **dynamic graph regression problem**. It models the satellite-receiver constellation as a sequence of heterogeneous star graphs, capturing the physical deterioration of the signal over time.

### Key Features:
- **Dynamic Star Graph:** Models the receiver as a central node and visible satellites as leaf nodes.
- **Spatiotemporal Fusion:** Uses a **HeteroGCLSTM** layer to process 10-second windows of signal history.
- **Minimalist Input:** Operates exclusively on standard NMEA observables (SNR, Azimuth, Elevation; Latitude and Longitude).
- **High Resilience:** Maintains centimeter-level accuracy even under severe -45 dBm jamming and data starvation.


## Project Structure

```text
├── gnss/ # Core library
│ ├── train/ # Training logic
│ ├── dataset.py # Graph construction & normalization
│ └── model.py # JaGuard architecture
├── params.yaml # Central experiment configuration
├── prepare_data.py # Data preprocessing
├── run_experiment.py # Execution for a single configuration
├── run_all_experiments.py # Master script for automated experimental sweeps
├── dvc.yaml # DVC pipeline orchestration
└── README.md
```
## Installation

Make sure you have [Conda](https://docs.conda.io/en/latest/) installed:

### 1. Create environment
conda create --solver classic -n gnss-py310 \
python=3.10 \
numpy=1.24.4 \
scipy=1.15.2 \
pandas=1.3.5 \
scikit-learn \
-c conda-forge -y

### 2. Activate environment
source $(conda info --base)/etc/profile.d/conda.sh
conda activate gnss-py310

### 3. Install remaining dependencies
pip install -r requirements.txt


## Automated Pipeline

This project is fully instrumented with Data Version Control (DVC). To simplify the research workflow, we use an automated sweep script that manages parameter updates and triggers the DVC pipeline internally.
This script automatically updates params.yaml for each configuration and executes dvc repro for you.

### 1. Run the full sweep with default settings
python run_all_experiments.py

### 2. Optional: Run a dry-run to see the experiment matrix without executing
python run_all_experiments.py --dry-run

#### 3. Optional: Filter by specific receivers or define custom seeds
python run_all_experiments.py --receivers Ublox10,GP01 --seeds 42,2024

## Citation




24 changes: 24 additions & 0 deletions dvc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
stages:
prepare_data:
cmd: python prepare_data.py
deps:
- prepare_data.py
- params.yaml
- gnss/
outs:
- ${output_dir}
train:
cmd: python run_experiment.py
deps:
- run_experiment.py
- gnss/
- params.yaml
- ${output_dir}
outs:
- ${train.output_dir}/best_model.ckpt
- ${train.output_dir}/dvclive/metrics.json:
cache: false
- ${train.output_dir}/dvclive/plots:
cache: false
metrics:
- ${train.output_dir}/metrics.yaml
Empty file added gnss/__init__.py
Empty file.
24 changes: 24 additions & 0 deletions gnss/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from pathlib import Path
from loguru import logger

PROJ_ROOT = Path(__file__).resolve().parents[1]
logger.info(f"PROJ_ROOT path is: {PROJ_ROOT}")

DATA_DIR = PROJ_ROOT / "data"
RAW_DATA_DIR = DATA_DIR / "raw"
PARSED_DATA_DIR = DATA_DIR / "parsed"
PROCESSED_DATA_DIR = DATA_DIR / "processed"

MIXED_DATA_DIR = DATA_DIR / "mixed"
RANDOM_MIXED_DATA_DIR = DATA_DIR / "mixed_random"

MODELS_DIR = PROJ_ROOT / "models"
REPORTS_DIR = PROJ_ROOT / "reports"
FIGURES_DIR = REPORTS_DIR / "figures"

try:
from tqdm import tqdm
logger.remove(0)
logger.add(lambda msg: tqdm.write(msg, end=""), colorize=True)
except ModuleNotFoundError:
pass
166 changes: 166 additions & 0 deletions gnss/dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from sklearn.preprocessing import StandardScaler
from torch_geometric_temporal.signal import DynamicHeteroGraphTemporalSignal


def load_and_process_single_measurement(sats_csv_path, receiver_csv_path):
sats_df_meas = pd.read_csv(sats_csv_path)
receiver_df_meas = pd.read_csv(receiver_csv_path)
time_steps_meas = sorted(receiver_df_meas['T_ID'].unique())

feature_dicts_meas = []
target_dicts_meas = []
edge_index_dicts_meas = []
additional_sids_dicts = []

for t_local in time_steps_meas:
rec = receiver_df_meas[receiver_df_meas['T_ID'] == t_local].iloc[0]
feat_rec = rec[['Lat', 'Lon']].to_numpy().reshape(1, 2)
targ_rec = rec[['LatDev', 'LonDev']].to_numpy().reshape(1, 2)

sats_t = sats_df_meas[sats_df_meas['T_ID'] == t_local].sort_values('S_ID')
feat_sat = sats_t[['SNR', 'az', 'el']].to_numpy()
s_ids_sat = sats_t['S_ID'].values.astype(np.int64)
n_sat = feat_sat.shape[0]

src = np.zeros(n_sat, dtype=int)
dst = np.arange(n_sat, dtype=int)
edges = np.vstack([src, dst]) if n_sat > 0 else np.empty((2, 0), dtype=int)
edges_rev = edges[::-1].copy()

feature_dicts_meas.append({'receiver': feat_rec, 'satellite': feat_sat})
target_dicts_meas.append({'receiver': targ_rec})
edge_index_dicts_meas.append({
('receiver', 'to', 'satellite'): edges,
('satellite', 'rev_to', 'receiver'): edges_rev
})
additional_sids_dicts.append({'satellite_s_ids': s_ids_sat})

return (
feature_dicts_meas,
target_dicts_meas,
edge_index_dicts_meas,
[None] * len(time_steps_meas),
time_steps_meas,
additional_sids_dicts
)


def load_all_measurements(measurement_files):
all_measurements_processed = []
for m_info in measurement_files:
features, targets, edges, weights, times, sids = load_and_process_single_measurement(
m_info["sats"], m_info["receiver"]
)
all_measurements_processed.append({
"id": m_info["id"],
"features": features,
"targets": targets,
"edges": edges,
"weights": weights,
"time_steps": times,
"satellite_s_ids": sids
})
return all_measurements_processed


def aggregate_for_normalization(measurements_data):
agg_rec, agg_sat, agg_targ = [], [], []
for meas in measurements_data:
for i in range(len(meas["features"])):
agg_rec.append(meas["features"][i]['receiver'])
agg_targ.append(meas["targets"][i]['receiver'])
fs = meas["features"][i]['satellite']
if fs.size > 0:
agg_sat.append(fs)

return (
np.vstack(agg_rec) if agg_rec else np.empty((0, 2)),
np.vstack(agg_sat) if agg_sat else np.empty((0, 3)),
np.vstack(agg_targ) if agg_targ else np.empty((0, 2))
)


def fit_standard_scalers(rec_np, sat_np, targ_np):
return (
StandardScaler().fit(rec_np),
StandardScaler().fit(sat_np),
StandardScaler().fit(targ_np)
)


def normalize_with_scalers(measurement_data_list, rec_scaler, sat_scaler, targ_scaler):
normalized_measurements = []
for meas_data in measurement_data_list:
norm_feat_dicts = []
norm_targ_dicts = []
norm_sids_list = []

for i in range(len(meas_data["features"])):
fr = meas_data["features"][i]['receiver']
fs = meas_data["features"][i]['satellite']
sids = meas_data["satellite_s_ids"][i]['satellite_s_ids']
tr = meas_data["targets"][i]['receiver']

norm_fr = rec_scaler.transform(fr)
norm_tr = targ_scaler.transform(tr)
norm_fs = sat_scaler.transform(fs) if fs.size > 0 else fs.copy()

norm_feat_dicts.append({'receiver': norm_fr, 'satellite': norm_fs})
norm_targ_dicts.append({'receiver': norm_tr})
norm_sids_list.append({'satellite_s_ids': sids.copy()})

normalized_measurements.append({
**meas_data,
"features": norm_feat_dicts,
"targets": norm_targ_dicts,
"satellite_s_ids": norm_sids_list
})

return normalized_measurements


def create_signals(measurements):
signals = []
for m in measurements:
sig = DynamicHeteroGraphTemporalSignal(
edge_index_dicts=m["edges"],
edge_weight_dicts=m["weights"],
feature_dicts=m["features"],
target_dicts=m["targets"],
**{"satellite_s_ids": m["satellite_s_ids"]}
)
signals.append(sig)
return signals


class SlidingWindowDataset(Dataset):
def __init__(self, signal, window_size, stride=1):
self.signal = signal
self.window_size = window_size
self.stride = stride

def __len__(self):
return max(0, (self.signal.snapshot_count - self.window_size) // self.stride + 1)

def __getitem__(self, idx):
start = idx * self.stride
return [self.signal[t] for t in range(start, start + self.window_size)]


def build_loader(signals, window_size, shuffle, stride=1):
datasets = []
for sig in signals:
ds = SlidingWindowDataset(sig, window_size, stride=stride)
if len(ds) > 0:
datasets.append(ds)
if not datasets:
return None
return DataLoader(
ConcatDataset(datasets),
batch_size=1,
shuffle=shuffle,
collate_fn=lambda batch: batch[0]
)
Loading