Objects: Lampposts, guardrails, buildings, trees, bushes, land use
Note: Lateral profiles (superelevation, crossfall) are not supported (2D only).
@@ -240,6 +313,31 @@ def get_verbose(self) -> bool:
"""Get verbose output setting (from --verbose flag)."""
return self.verbose
+ def get_import_filter(self) -> tuple:
+ """Return (import_signals, import_parking, import_object_types).
+
+ import_object_types is None if all types are selected (or no filter shown).
+ """
+ if not self._feature_checkboxes:
+ return True, True, None
+
+ import_signals = self._feature_checkboxes.get('signals', None)
+ import_signals = import_signals.isChecked() if import_signals else True
+
+ import_parking = self._feature_checkboxes.get('parking', None)
+ import_parking = import_parking.isChecked() if import_parking else True
+
+ obj_type_boxes = {k: v for k, v in self._feature_checkboxes.items()
+ if isinstance(k, ObjectType)}
+ if not obj_type_boxes:
+ import_object_types = None
+ elif all(cb.isChecked() for cb in obj_type_boxes.values()):
+ import_object_types = None # All checked → no filter
+ else:
+ import_object_types = {ot for ot, cb in obj_type_boxes.items() if cb.isChecked()}
+
+ return import_signals, import_parking, import_object_types
+
def accept(self):
"""Handle accept (validate before closing)."""
file_path = self.get_file_path()
diff --git a/orbit/gui/dialogs/preferences_dialog.py b/orbit/gui/dialogs/preferences_dialog.py
index e27c6f5..7d1625b 100644
--- a/orbit/gui/dialogs/preferences_dialog.py
+++ b/orbit/gui/dialogs/preferences_dialog.py
@@ -6,9 +6,18 @@
"""
from PyQt6.QtCore import Qt
-from PyQt6.QtWidgets import QAbstractItemView, QComboBox, QDoubleSpinBox, QLineEdit, QListWidget, QListWidgetItem
+from PyQt6.QtWidgets import (
+ QAbstractItemView,
+ QCheckBox,
+ QComboBox,
+ QDoubleSpinBox,
+ QLineEdit,
+ QListWidget,
+ QListWidgetItem,
+)
from orbit.models import Project, SignLibraryManager
+from orbit.utils.provenance import DEFAULT_TEMPLATE, is_dataprov_available
from .base_dialog import BaseDialog, InfoIconLabel
@@ -20,6 +29,8 @@ def __init__(self, project: Project, parent=None):
super().__init__("Project Preferences", parent, min_width=500)
self.project = project
+ from PyQt6.QtCore import QSettings
+ self.app_settings = QSettings()
self.setup_ui()
self.load_properties()
@@ -47,11 +58,15 @@ def setup_ui(self):
self.transform_method_combo = QComboBox()
self.transform_method_combo.addItem("Affine (for orthophotos, satellite imagery)", "affine")
self.transform_method_combo.addItem("Homography (for oblique drone imagery)", "homography")
+ self.transform_method_combo.addItem("Drone-assisted (requires drone log)", "drone_assisted")
transform_label = InfoIconLabel(
"Transformation Method:",
"Affine: Best for nadir (straight down) aerial/satellite images. Requires 3+ control points.\n"
- "Homography: Best for tilted camera drone images with perspective. Requires 4+ control points.",
+ "Homography: Best for tilted camera drone images with perspective. Requires 4+ control points.\n"
+ "Drone-assisted: Uses drone flight log (position, altitude, gimbal) for a physically-derived\n"
+ " homography. Requires drone log loaded in the Georeferencing dialog. Works even when\n"
+ " GCPs are nearly collinear (e.g., all along one road).",
bold=False
)
georef_layout.addRow(transform_label, self.transform_method_combo)
@@ -157,6 +172,33 @@ def setup_ui(self):
)
sign_layout.addRow(library_label, self.library_list)
+ # Provenance tracking section
+ prov_layout = self.add_form_group("Data Provenance")
+
+ dataprov_available = is_dataprov_available()
+ hint = "" if dataprov_available else " (install dataprov to enable)"
+
+ self.provenance_checkbox = QCheckBox(f"Create provenance sidecar files{hint}")
+ self.provenance_checkbox.setEnabled(dataprov_available)
+ self.provenance_checkbox.setToolTip(
+ "When enabled, a .prov.json file is written alongside each saved project "
+ "and each exported file, recording the tools and inputs used to create it."
+ )
+ prov_layout.addRow("", self.provenance_checkbox)
+
+ self.provenance_template_edit = QLineEdit()
+ self.provenance_template_edit.setPlaceholderText(DEFAULT_TEMPLATE)
+ self.provenance_template_edit.setEnabled(dataprov_available)
+ template_label = InfoIconLabel(
+ "File name template:",
+ "Template for provenance sidecar file names.\n"
+ "Variables: {dir} parent directory, {stem} filename without extension, "
+ "{ext} extension (with dot), {name} full filename.\n"
+ f"Default: {DEFAULT_TEMPLATE}",
+ bold=False,
+ )
+ prov_layout.addRow(template_label, self.provenance_template_edit)
+
# Create standard OK/Cancel buttons
self.create_button_box()
@@ -168,9 +210,19 @@ def load_properties(self):
# Transformation method
if self.project.transform_method == 'homography':
self.transform_method_combo.setCurrentIndex(1)
+ elif self.project.transform_method == 'drone_assisted':
+ self.transform_method_combo.setCurrentIndex(2)
else:
self.transform_method_combo.setCurrentIndex(0)
+ # Disable drone-assisted if no drone log is loaded
+ model = self.transform_method_combo.model()
+ drone_item = model.item(2)
+ if self.project.drone_metadata is None:
+ from PyQt6.QtGui import QColor
+ drone_item.setEnabled(False)
+ drone_item.setForeground(QColor('gray'))
+
# Traffic side
if self.project.right_hand_traffic:
self.traffic_combo.setCurrentIndex(0)
@@ -195,6 +247,14 @@ def load_properties(self):
if lib_id in enabled_libs:
item.setSelected(True)
+ # Provenance settings (app-level, from QSettings)
+ self.provenance_checkbox.setChecked(
+ self.app_settings.value("provenance/enabled", False, type=bool)
+ )
+ self.provenance_template_edit.setText(
+ self.app_settings.value("provenance/name_template", DEFAULT_TEMPLATE, type=str)
+ )
+
def accept(self):
"""Save preferences and close dialog."""
# Save map name
@@ -227,4 +287,9 @@ def accept(self):
enabled_libs = ['se']
self.project.enabled_sign_libraries = enabled_libs
+ # Save provenance settings to app QSettings
+ self.app_settings.setValue("provenance/enabled", self.provenance_checkbox.isChecked())
+ template = self.provenance_template_edit.text().strip() or DEFAULT_TEMPLATE
+ self.app_settings.setValue("provenance/name_template", template)
+
super().accept()
diff --git a/orbit/gui/graphics/object_graphics.py b/orbit/gui/graphics/object_graphics.py
index 0989382..5892bd0 100644
--- a/orbit/gui/graphics/object_graphics.py
+++ b/orbit/gui/graphics/object_graphics.py
@@ -18,7 +18,7 @@
ObjectType.TREE_CONIFER: QColor(34, 139, 34, 77), # Forest green
ObjectType.BUSH: QColor(34, 139, 34, 77), # Forest green
ObjectType.GUARDRAIL: QColor(25, 25, 112, 77), # Dark blue
- ObjectType.LAMPPOST: QColor(255, 255, 255, 77), # White
+ ObjectType.LAMPPOST: QColor(255, 200, 0, 220), # Amber/yellow
# Land use areas
ObjectType.LANDUSE_FOREST: QColor(0, 100, 0, 77), # Dark green
ObjectType.LANDUSE_FARMLAND: QColor(210, 180, 100, 77), # Wheat/tan
@@ -47,11 +47,11 @@ def create_lamppost_path(scale: float = 1.0) -> QPainterPath:
path = QPainterPath()
# Small circle for pole base
- radius = 3.0 * scale
+ radius = 5.0 * scale
path.addEllipse(-radius, -radius, radius * 2, radius * 2)
# Orientation line (pointing direction)
- line_length = 10.0 * scale
+ line_length = 14.0 * scale
path.moveTo(0, 0)
path.lineTo(line_length, 0)
diff --git a/orbit/gui/graphics/object_graphics_item.py b/orbit/gui/graphics/object_graphics_item.py
index 4a91e23..73106cb 100644
--- a/orbit/gui/graphics/object_graphics_item.py
+++ b/orbit/gui/graphics/object_graphics_item.py
@@ -372,13 +372,11 @@ def get_segment_at(self, scene_pos: QPointF, tolerance: float = 8.0) -> int:
return -1
def set_selected(self, selected: bool):
- """
- Set selection state of the object.
-
- Args:
- selected: True to select, False to deselect
- """
+ """Set selection state of the object."""
self._is_selected = selected
- self.selection_item.setVisible(selected)
+ try:
+ self.selection_item.setVisible(selected)
+ except RuntimeError:
+ return
if self._is_polygon_with_points():
self.update_graphics() # Refresh vertex handles
diff --git a/orbit/gui/image_view.py b/orbit/gui/image_view.py
index e36920b..4e0fd70 100644
--- a/orbit/gui/image_view.py
+++ b/orbit/gui/image_view.py
@@ -14,12 +14,15 @@
from PyQt6.QtWidgets import (
QGraphicsEllipseItem,
QGraphicsItem,
+ QGraphicsItemGroup,
QGraphicsLineItem,
QGraphicsPathItem,
QGraphicsPixmapItem,
QGraphicsRectItem,
QGraphicsScene,
+ QGraphicsTextItem,
QGraphicsView,
+ QInputDialog,
QMenu,
QMessageBox,
)
@@ -41,6 +44,75 @@
from .utils.message_helpers import ask_yes_no, show_warning
+class ControlPointItem(QGraphicsItemGroup):
+ """Draggable crosshair marker for a georeferencing control point."""
+
+ ARM_LENGTH = 10
+ GAP = 3
+
+ def __init__(self, control_point, parent=None):
+ super().__init__(parent)
+ self.control_point = control_point
+ self.moved_callback = None # Set to callable(ControlPoint) after creation
+
+ self.setFlag(QGraphicsItemGroup.GraphicsItemFlag.ItemIsMovable, True)
+ self.setFlag(QGraphicsItemGroup.GraphicsItemFlag.ItemSendsGeometryChanges, True)
+ self.setFlag(QGraphicsItemGroup.GraphicsItemFlag.ItemIsSelectable, True)
+ self.setAcceptHoverEvents(True)
+
+ pen = QPen(QColor(0, 100, 255), 2)
+ dot_pen = QPen(QColor(0, 100, 255), 1)
+ dot_brush = QBrush(QColor(0, 100, 255))
+ a, g = self.ARM_LENGTH, self.GAP
+
+ for item in [
+ QGraphicsLineItem(-a, 0, -g, 0),
+ QGraphicsLineItem(g, 0, a, 0),
+ QGraphicsLineItem(0, -a, 0, -g),
+ QGraphicsLineItem(0, g, 0, a),
+ ]:
+ item.setPen(pen)
+ item.setZValue(10)
+ self.addToGroup(item)
+
+ dot = QGraphicsEllipseItem(-0.5, -0.5, 1, 1)
+ dot.setPen(dot_pen)
+ dot.setBrush(dot_brush)
+ dot.setZValue(10)
+ self.addToGroup(dot)
+
+ if control_point.name:
+ font = QFont()
+ font.setBold(True)
+ font.setPointSize(10)
+ label = QGraphicsTextItem(control_point.name)
+ label.setDefaultTextColor(QColor(0, 100, 255))
+ label.setFont(font)
+ label.setPos(15, -10)
+ label.setZValue(11)
+ self.addToGroup(label)
+
+ self.setPos(control_point.pixel_x, control_point.pixel_y)
+ self.setZValue(10)
+
+ def itemChange(self, change, value):
+ if change == QGraphicsItemGroup.GraphicsItemChange.ItemPositionHasChanged:
+ pos = self.pos()
+ self.control_point.pixel_x = pos.x()
+ self.control_point.pixel_y = pos.y()
+ if self.moved_callback:
+ self.moved_callback(self.control_point)
+ return super().itemChange(change, value)
+
+ def hoverEnterEvent(self, event):
+ self.setCursor(Qt.CursorShape.SizeAllCursor)
+ super().hoverEnterEvent(event)
+
+ def hoverLeaveEvent(self, event):
+ self.unsetCursor()
+ super().hoverLeaveEvent(event)
+
+
class ImageView(QGraphicsView):
"""Interactive image view with polyline drawing and editing."""
@@ -90,6 +162,7 @@ class ImageView(QGraphicsView):
# dragged_road_id, target_road_id, dragged_contact, target_contact
road_link_requested = pyqtSignal(str, str, str, str)
road_unlink_requested = pyqtSignal(str, str) # road_id, linked_road_id (for disconnect)
+ control_point_moved = pyqtSignal(object) # Emits ControlPoint when dragged
def __init__(self, parent=None, verbose: bool = False):
super().__init__(parent)
@@ -282,9 +355,9 @@ def set_synthetic_canvas(self, width: int, height: int, color=None):
pixmap = QPixmap(width, height)
pixmap.fill(color)
- # Clear scene and add synthetic pixmap
+ # Clear scene and all item tracking dicts
self.scene.clear()
- self.polyline_items.clear()
+ self._clear_item_dicts()
self.image_item = self.scene.addPixmap(pixmap)
self.image_item.setZValue(0)
self.image_np = None
@@ -317,9 +390,9 @@ def load_image(self, image_path: Path):
)
pixmap = QPixmap.fromImage(q_image)
- # Clear scene and add image
+ # Clear scene and all item tracking dicts
self.scene.clear()
- self.polyline_items.clear()
+ self._clear_item_dicts()
self.image_item = self.scene.addPixmap(pixmap)
self.image_item.setZValue(0)
@@ -1031,49 +1104,11 @@ def update_object_scale_factors(self, scale_factor: float):
item.update_scale_factor(scale_factor)
def add_control_point_graphics(self, control_point):
- """Add a control point marker to the graphics scene as a crosshair."""
-
- x, y = control_point.pixel_x, control_point.pixel_y
-
- # Crosshair parameters
- arm_length = 10 # Length of each arm from center
- gap = 3 # Gap radius at center (so target pixel is visible)
-
- # Main crosshair pen (bright blue)
- pen = QPen(QColor(0, 100, 255), 2)
-
- # Horizontal arms (left and right of center gap)
- left_arm = self.scene.addLine(x - arm_length, y, x - gap, y, pen)
- right_arm = self.scene.addLine(x + gap, y, x + arm_length, y, pen)
-
- # Vertical arms (top and bottom of center gap)
- top_arm = self.scene.addLine(x, y - arm_length, x, y - gap, pen)
- bottom_arm = self.scene.addLine(x, y + gap, x, y + arm_length, pen)
-
- # Tiny center dot for exact position reference
- dot_pen = QPen(QColor(0, 100, 255), 1)
- dot_brush = QBrush(QColor(0, 100, 255))
- center_dot = self.scene.addEllipse(x - 0.5, y - 0.5, 1, 1, dot_pen, dot_brush)
-
- # Set z-values and add to tracking list
- for item in [left_arm, right_arm, top_arm, bottom_arm, center_dot]:
- item.setZValue(10)
- self.control_point_items.append(item)
-
- # Add label with CP name
- if control_point.name:
- from PyQt6.QtGui import QFont
- from PyQt6.QtWidgets import QGraphicsTextItem
- text_item = QGraphicsTextItem(control_point.name)
- text_item.setDefaultTextColor(QColor(0, 100, 255)) # Bright blue
- font = QFont()
- font.setBold(True)
- font.setPointSize(10)
- text_item.setFont(font)
- text_item.setPos(x + 15, y - 10)
- text_item.setZValue(11)
- self.scene.addItem(text_item)
- self.control_point_items.append(text_item)
+ """Add a draggable control point marker to the graphics scene."""
+ item = ControlPointItem(control_point)
+ item.moved_callback = lambda cp: self.control_point_moved.emit(cp)
+ self.scene.addItem(item)
+ self.control_point_items.append(item)
def add_road_lanes_graphics(self, road: Road, scale_factors: tuple = None):
"""
@@ -1651,6 +1686,20 @@ def update_polyline(self, polyline_id: str):
if self.soffsets_visible:
self._update_soffset_labels(polyline_id)
+ def _clear_item_dicts(self):
+ """Clear all item tracking dicts after scene.clear() has been called."""
+ self.polyline_items.clear()
+ self.junction_items.clear()
+ self.signal_items.clear()
+ self.object_items.clear()
+ self.parking_items.clear()
+ self.control_point_items.clear()
+ self.road_lanes_items.clear()
+ self.connecting_road_centerline_items.clear()
+ self.connecting_road_lanes_items.clear()
+ self.soffset_labels.clear()
+ self.junction_debug_items.clear()
+
def safe_remove_item(self, item: QGraphicsItem) -> bool:
"""
Safely remove a graphics item from the scene.
@@ -1686,11 +1735,7 @@ def safe_remove_items(self, items: List[QGraphicsItem]):
def clear(self):
"""Clear the view."""
self.scene.clear()
- self.polyline_items.clear()
- self.junction_items.clear()
- self.control_point_items.clear()
- self.road_lanes_items.clear()
- self.soffset_labels.clear() # Clear s-offset labels
+ self._clear_item_dicts()
self.project = None
self.image_item = None
self.image_np = None
@@ -2445,6 +2490,18 @@ def select_object(self, object_id: str):
x, y = obj.position
self.centerOn(x, y)
+ def _adjacent_section_for_cr(self, cr, adjacent) -> int | None:
+ """Section number of `adjacent` road that touches `cr`, honoring contact point."""
+ if not adjacent.lane_sections:
+ return None
+ if cr.predecessor_id == adjacent.id:
+ section_idx = 0 if cr.predecessor_contact == "start" else -1
+ elif cr.successor_id == adjacent.id:
+ section_idx = 0 if cr.successor_contact == "start" else -1
+ else:
+ section_idx = 0
+ return adjacent.lane_sections[section_idx].section_number
+
def _get_connecting_road_lane_id(self, junction, connecting_road_id: str, source_lane_id: int) -> int | None:
"""
Determine which lane on a connecting road corresponds to a source lane.
@@ -2536,50 +2593,67 @@ def find_connected_lanes(self, road_id: str, section_number: int, lane_id: int)
if road.successor_id and road.successor_id in connected_ids:
skip_successor = True
- # 1. Check direct road predecessor/successor links (not through junctions)
- # Skip if both roads are in the same junction - junction connections take precedence
- if is_first_section and road.predecessor_id and not skip_predecessor:
- pred_road = self.project.get_road(road.predecessor_id)
- if pred_road and pred_road.lane_sections:
- # Predecessor connects at its last section
- pred_section = pred_road.lane_sections[-1].section_number
- # Assume same lane exists in connected road (common case for continuous roads)
- result['road_lanes'].append((road.predecessor_id, pred_section, lane_id))
-
- if is_last_section and road.successor_id and not skip_successor:
- succ_road = self.project.get_road(road.successor_id)
- if succ_road and succ_road.lane_sections:
- # Successor connects at its first section
- succ_section = succ_road.lane_sections[0].section_number
- # Assume same lane exists in connected road (common case for continuous roads)
- result['road_lanes'].append((road.successor_id, succ_section, lane_id))
+ # 1. Check direct road predecessor/successor links (not through junctions).
+ # Skip if both roads are in the same junction - junction connections take precedence.
+ # Also skip for connecting roads: their neighbor lane mapping requires sign flips at
+ # opposite-direction contacts, which are fully described by junction.lane_connections below.
+ if not road.is_connecting_road:
+ if is_first_section and road.predecessor_id and not skip_predecessor:
+ pred_road = self.project.get_road(road.predecessor_id)
+ if pred_road and pred_road.lane_sections:
+ pred_section = pred_road.lane_sections[-1].section_number
+ result['road_lanes'].append((road.predecessor_id, pred_section, lane_id))
+
+ if is_last_section and road.successor_id and not skip_successor:
+ succ_road = self.project.get_road(road.successor_id)
+ if succ_road and succ_road.lane_sections:
+ succ_section = succ_road.lane_sections[0].section_number
+ result['road_lanes'].append((road.successor_id, succ_section, lane_id))
# 2. Search all junctions for lane connections involving this lane
for junction in self.project.junctions:
for lane_conn in junction.lane_connections:
- # Check if this lane is the source (find successor via junction)
- if lane_conn.from_road_id == road_id and lane_conn.from_lane_id == lane_id:
- # Only consider if this is the last section (connects to junction)
- if is_last_section:
- # Only add connecting road lane - don't show destination road beyond junction
- if lane_conn.connecting_road_id:
- conn_lane_id = self._get_connecting_road_lane_id(
- junction, lane_conn.connecting_road_id, lane_id
+ # Case A: this road is the incoming source (find CR lane at junction).
+ if (lane_conn.from_road_id == road_id and lane_conn.from_lane_id == lane_id
+ and is_last_section and lane_conn.connecting_road_id):
+ conn_lane_id = lane_conn.connecting_lane_id
+ if conn_lane_id is None:
+ conn_lane_id = self._get_connecting_road_lane_id(
+ junction, lane_conn.connecting_road_id, lane_id
+ )
+ if conn_lane_id is not None:
+ result['connecting_road_lanes'].append((lane_conn.connecting_road_id, conn_lane_id))
+
+ # Case B: this road is the outgoing destination (find CR lane at junction).
+ if (lane_conn.to_road_id == road_id and lane_conn.to_lane_id == lane_id
+ and is_first_section and lane_conn.connecting_road_id):
+ conn_lane_id = lane_conn.connecting_lane_id
+ if conn_lane_id is None:
+ conn_lane_id = self._get_connecting_road_lane_id(
+ junction, lane_conn.connecting_road_id, lane_conn.from_lane_id
+ )
+ if conn_lane_id is not None:
+ result['connecting_road_lanes'].append((lane_conn.connecting_road_id, conn_lane_id))
+
+ # Case C: this road IS the connecting road — highlight incoming/outgoing road lanes
+ # using the stored from/to lane IDs (which already encode direction-flip semantics).
+ if (lane_conn.connecting_road_id == road_id
+ and lane_conn.connecting_lane_id is not None
+ and lane_conn.connecting_lane_id == lane_id):
+ from_road = self.project.get_road(lane_conn.from_road_id)
+ if from_road and from_road.lane_sections:
+ from_section = self._adjacent_section_for_cr(road, from_road)
+ if from_section is not None:
+ result['road_lanes'].append(
+ (lane_conn.from_road_id, from_section, lane_conn.from_lane_id)
)
- if conn_lane_id is not None:
- result['connecting_road_lanes'].append((lane_conn.connecting_road_id, conn_lane_id))
-
- # Check if this lane is the destination (find predecessor via junction)
- if lane_conn.to_road_id == road_id and lane_conn.to_lane_id == lane_id:
- # Only consider if this is the first section (connects from junction)
- if is_first_section:
- # Only add connecting road lane - don't show source road beyond junction
- if lane_conn.connecting_road_id:
- conn_lane_id = self._get_connecting_road_lane_id(
- junction, lane_conn.connecting_road_id, lane_conn.from_lane_id
+ to_road = self.project.get_road(lane_conn.to_road_id)
+ if to_road and to_road.lane_sections:
+ to_section = self._adjacent_section_for_cr(road, to_road)
+ if to_section is not None:
+ result['road_lanes'].append(
+ (lane_conn.to_road_id, to_section, lane_conn.to_lane_id)
)
- if conn_lane_id is not None:
- result['connecting_road_lanes'].append((lane_conn.connecting_road_id, conn_lane_id))
return result
@@ -2873,18 +2947,24 @@ def select_connecting_road_lane(self, connecting_road_id: str, lane_id: int):
if not conn_road or not conn_road.is_connecting_road:
continue
- # Check if this lane connection corresponds to the selected lane
- expected_lane = self._get_connecting_road_lane_id(
- junction, connecting_road_id, lane_conn.from_lane_id
- )
- if expected_lane != lane_id:
- continue
+ # Check if this lane connection corresponds to the selected lane.
+ # Prefer the stored connecting_lane_id (already encodes direction flips);
+ # fall back to the ordinal helper only for legacy data without it.
+ if lane_conn.connecting_lane_id is not None:
+ if lane_conn.connecting_lane_id != lane_id:
+ continue
+ else:
+ expected_lane = self._get_connecting_road_lane_id(
+ junction, connecting_road_id, lane_conn.from_lane_id
+ )
+ if expected_lane != lane_id:
+ continue
- # Highlight the from_road lane (last section)
+ # Highlight the from_road lane at the section that touches the CR
from_road = self.project.get_road(lane_conn.from_road_id)
if from_road and from_road.lane_sections:
- from_section = from_road.lane_sections[-1].section_number
- if lane_conn.from_road_id in self.road_lanes_items:
+ from_section = self._adjacent_section_for_cr(conn_road, from_road)
+ if from_section is not None and lane_conn.from_road_id in self.road_lanes_items:
lanes_item = self.road_lanes_items[lane_conn.from_road_id]
for lane_polygon in lanes_item.lane_items:
if (isinstance(lane_polygon, InteractiveLanePolygon) and
@@ -2894,11 +2974,11 @@ def select_connecting_road_lane(self, connecting_road_id: str, lane_id: int):
lane_polygon.set_linked(True)
self.linked_lane_polygons.append(lane_polygon)
- # Highlight the to_road lane (first section)
+ # Highlight the to_road lane at the section that touches the CR
to_road = self.project.get_road(lane_conn.to_road_id)
if to_road and to_road.lane_sections:
- to_section = to_road.lane_sections[0].section_number
- if lane_conn.to_road_id in self.road_lanes_items:
+ to_section = self._adjacent_section_for_cr(conn_road, to_road)
+ if to_section is not None and lane_conn.to_road_id in self.road_lanes_items:
lanes_item = self.road_lanes_items[lane_conn.to_road_id]
for lane_polygon in lanes_item.lane_items:
if (isinstance(lane_polygon, InteractiveLanePolygon) and
@@ -3304,6 +3384,14 @@ def _show_centerline_point_menu(self, view_pos, polyline_id: str, point_index: i
disconnect_action = menu.addAction(f"Disconnect from '{linked_name}'")
linked_road_id = road.successor_id
+ # Smooth Curve option — always shown for road centerlines
+ menu.addSeparator()
+ smooth_action = menu.addAction("Smooth Road Curve")
+ smooth_action.setToolTip(
+ "Redistribute polyline points along a smooth Bezier curve, "
+ "keeping start/end positions and tangent directions."
+ )
+
# Show menu and get selected action
action = menu.exec(self.mapToGlobal(view_pos))
@@ -3311,6 +3399,13 @@ def _show_centerline_point_menu(self, view_pos, polyline_id: str, point_index: i
self._delete_point(polyline_id, point_index)
elif disconnect_action and action == disconnect_action and road and linked_road_id:
self.road_unlink_requested.emit(road.id, linked_road_id)
+ elif action == smooth_action:
+ n_pts, ok = QInputDialog.getInt(
+ self, "Smooth Road Curve", "Number of output points:",
+ value=50, min=5, max=500, step=5,
+ )
+ if ok:
+ self._smooth_road_polyline(polyline_id, n_pts)
elif action == split_section_action and road:
# Warn if creating a small section
s_coords = road.calculate_centerline_s_coordinates(polyline.points)
@@ -3340,6 +3435,143 @@ def _show_centerline_point_menu(self, view_pos, polyline_id: str, point_index: i
# Emit signal for MainWindow to handle road splitting
self.road_split_requested.emit(road.id, polyline_id, point_index)
+ def _smooth_road_polyline(self, polyline_id: str, num_points: int = 50) -> None:
+ """Smooth a regular road's centerline using adjacent road tangents."""
+ import math as _math
+
+ from orbit.gui.project_controller import get_contact_pos_heading
+ from orbit.gui.undo_commands import ModifyPolylineCommand
+ from orbit.utils.geometry import fit_smooth_curve_to_polyline
+
+ item = self.polyline_items.get(polyline_id)
+ if not item:
+ return
+ pts = list(item.polyline.points)
+ if len(pts) < 3:
+ return
+
+ # Default tangents from first/last segment
+ start_hdg = _math.atan2(pts[1][1] - pts[0][1], pts[1][0] - pts[0][0])
+ end_hdg = _math.atan2(pts[-1][1] - pts[-2][1], pts[-1][0] - pts[-2][0])
+
+ # Override with adjacent road tangents when available (more accurate)
+ road = self._find_road_by_centerline(polyline_id)
+ if road and self.project:
+ if road.predecessor_id:
+ pred_road = self.project.get_road(road.predecessor_id)
+ if pred_road:
+ pred_pl = self.project.get_polyline(pred_road.centerline_id)
+ if pred_pl:
+ _, h = get_contact_pos_heading(pred_pl, road.predecessor_contact)
+ if road.predecessor_contact == "start":
+ h += _math.pi
+ start_hdg = h
+ if road.successor_id:
+ succ_road = self.project.get_road(road.successor_id)
+ if succ_road:
+ succ_pl = self.project.get_polyline(succ_road.centerline_id)
+ if succ_pl:
+ _, h = get_contact_pos_heading(succ_pl, road.successor_contact)
+ if road.successor_contact == "end":
+ h += _math.pi
+ end_hdg = h
+
+ # Use num_points; ensure at least original count so we don't lose resolution
+ n_out = max(num_points, 2)
+ new_pts = fit_smooth_curve_to_polyline(pts, start_hdg, end_hdg, num_output_points=n_out)
+
+ old_geo = list(item.polyline.geo_points) if item.polyline.geo_points else None
+
+ # Apply first (convention: caller applies, then pushes undo command)
+ item.polyline.points = new_pts
+ item.polyline.geo_points = None # pixel coords are now primary
+ item.update_graphics()
+
+ cmd = ModifyPolylineCommand(
+ self.parent(),
+ polyline_id,
+ old_points=pts,
+ new_points=new_pts,
+ old_geo_points=old_geo,
+ new_geo_points=None,
+ description="Smooth Road Curve",
+ )
+ if self.parent() and hasattr(self.parent(), 'undo_stack'):
+ self.parent().undo_stack.push(cmd)
+
+ def _show_cr_centerline_menu(self, view_pos, conn_road_id: str, point_index: int) -> None:
+ """Context menu for a connecting road centerline (right-click on point or segment)."""
+ from orbit.gui.undo_commands import SmoothCRCommand
+ from orbit.utils.geometry import fit_smooth_curve_to_polyline, get_smooth_cr_tangents
+
+ item = self.connecting_road_centerline_items.get(conn_road_id)
+ if not item:
+ return
+ cr = item.connecting_road
+
+ menu = QMenu()
+
+ # "Delete Point" only for polyline CRs with a clicked point
+ delete_action = None
+ if cr.geometry_type == "polyline" and point_index >= 0 and len(cr.inline_path) > 2:
+ delete_action = menu.addAction("Delete Point")
+ menu.addSeparator()
+
+ smooth_action = menu.addAction("Smooth Curve")
+ smooth_action.setToolTip(
+ "Redistribute the curve's points along a smooth Bezier using "
+ "adjacent road tangents. Works for all geometry types."
+ )
+
+ action = menu.exec(self.mapToGlobal(view_pos))
+
+ if delete_action and action == delete_action:
+ cr.inline_path.pop(point_index)
+ if cr.inline_geo_path is not None:
+ transformer = self._get_geo_transformer()
+ if transformer:
+ cr.inline_geo_path = [
+ transformer.pixel_to_geo(x, y) for x, y in cr.inline_path
+ ]
+ elif 0 <= point_index < len(cr.inline_geo_path):
+ cr.inline_geo_path.pop(point_index)
+ item.update_graphics()
+ if conn_road_id in self.connecting_road_lanes_items:
+ self.connecting_road_lanes_items[conn_road_id].update_graphics()
+
+ elif action == smooth_action:
+ path = cr.inline_path
+ if not path or len(path) < 2:
+ return
+ # Prefer stored headings (set by _regenerate_parampoly3_cr, authoritative)
+ if cr.stored_start_heading is not None and cr.stored_end_heading is not None:
+ start_hdg, end_hdg = cr.stored_start_heading, cr.stored_end_heading
+ else:
+ tangents = get_smooth_cr_tangents(cr, self.project)
+ if tangents is None:
+ return
+ start_hdg, end_hdg = tangents
+
+ n_pts, ok = QInputDialog.getInt(
+ self, "Smooth Curve", "Number of output points:",
+ value=50, min=5, max=500, step=5,
+ )
+ if not ok:
+ return
+
+ old_path = list(path)
+ n_out = max(n_pts, 2)
+ new_path = fit_smooth_curve_to_polyline(path, start_hdg, end_hdg, num_output_points=n_out)
+
+ # Apply first, then push for undo
+ cr.inline_path = new_path
+ self.update_connecting_road_graphics(conn_road_id)
+
+ mw = self.parent()
+ if mw and hasattr(mw, 'undo_stack'):
+ cmd = SmoothCRCommand(self, cr, old_path, new_path)
+ mw.undo_stack.push(cmd)
+
def _show_boundary_point_menu(self, view_pos, polyline_id: str, point_index: int):
"""
Show context menu for boundary polyline point with Delete option.
@@ -4092,26 +4324,13 @@ def _handle_right_click_context_menu(self, scene_pos, event: QMouseEvent):
self._show_boundary_point_menu(event.pos(), polyline_id, point_index)
return
- # Connecting road point deletion (polyline geometry only)
+ # Connecting road right-click: point or segment
for conn_road_id, item in self.connecting_road_centerline_items.items():
- if item.connecting_road.geometry_type != "polyline":
- continue
- point_index = item.get_point_at(scene_pos)
- if point_index >= 0:
- connecting_road = item.connecting_road
- if len(connecting_road.inline_path) > 2:
- connecting_road.inline_path.pop(point_index)
- if connecting_road.inline_geo_path is not None:
- transformer = self._get_geo_transformer()
- if transformer:
- connecting_road.inline_geo_path = [
- transformer.pixel_to_geo(x, y) for x, y in connecting_road.inline_path
- ]
- elif 0 <= point_index < len(connecting_road.inline_geo_path):
- connecting_road.inline_geo_path.pop(point_index)
- item.update_graphics()
- if conn_road_id in self.connecting_road_lanes_items:
- self.connecting_road_lanes_items[conn_road_id].update_graphics()
+ cr = item.connecting_road
+ point_index = item.get_point_at(scene_pos) if cr.geometry_type == "polyline" else -1
+ segment_index = item.get_segment_at(scene_pos) if point_index < 0 else -1
+ if point_index >= 0 or segment_index >= 0:
+ self._show_cr_centerline_menu(event.pos(), conn_road_id, point_index)
return
def mouseMoveEvent(self, event: QMouseEvent):
diff --git a/orbit/gui/main_window.py b/orbit/gui/main_window.py
index be3e1c6..9c367c2 100644
--- a/orbit/gui/main_window.py
+++ b/orbit/gui/main_window.py
@@ -24,6 +24,7 @@
from orbit.models import Project
from orbit.utils.coordinate_transform import TransformAdjustment
from orbit.utils.logging_config import get_logger
+from orbit.utils.provenance import is_dataprov_available, record_export, record_project_save
from .image_view import ImageView
from .project_controller import ProjectController
@@ -60,6 +61,10 @@ def __init__(self, image_path: Optional[Path] = None, verbose: bool = False,
self._original_transformer = None # Saved original transformer
self._aerial_transformer = None # Transformer for aerial tile image
self._aerial_zoom = 18 # Default tile zoom level
+ self._original_cp_pixels: list = [] # Saved CP pixel positions for round-trip restore
+ self._original_cr_paths: dict = {} # Saved connecting road inline_paths for round-trip restore
+ self._original_junction_centers: dict = {} # Saved junction center_point pixel coords for round-trip restore
+ self._original_view_adjustment = None # Saved current_adjustment before aerial switch
# Adjustment ghost overlay (shows unadjusted geometry positions)
self._adjustment_ghost_overlay = None
@@ -635,6 +640,46 @@ def _remember_directory(self, file_path: str) -> None:
if file_path:
self._last_file_directory = str(Path(file_path).parent)
+ def _provenance_setting_enabled(self) -> bool:
+ """Return True if provenance tracking is requested via settings."""
+ return self.settings.value("provenance/enabled", False, type=bool)
+
+ def _provenance_enabled(self) -> bool:
+ """Return True if provenance tracking is enabled and dataprov is available."""
+ return is_dataprov_available() and self._provenance_setting_enabled()
+
+ def _check_provenance_ready(self) -> bool:
+ """Return False (and show an error) if provenance is enabled but dataprov is missing."""
+ if self._provenance_setting_enabled() and not is_dataprov_available():
+ show_error(
+ self,
+ "Provenance tracking is enabled in Preferences, but the dataprov "
+ "package is not installed.\n\n"
+ "Install dataprov or disable provenance tracking in Preferences.",
+ "Provenance Unavailable",
+ )
+ return False
+ return True
+
+ def _provenance_template(self) -> str:
+ from orbit.utils.provenance import DEFAULT_TEMPLATE
+ return self.settings.value("provenance/name_template", DEFAULT_TEMPLATE, type=str)
+
+ def _record_project_provenance(self, orbit_path: Path, start_time) -> None:
+ """Record a provenance step for a project save, if enabled."""
+ if not self._provenance_enabled():
+ return
+ record_project_save(self.project, orbit_path, start_time, self._provenance_template())
+
+ def _record_export_provenance(self, output_path: Path, operation: str, output_format: str, start_time) -> None:
+ """Record a provenance step for an export, if enabled."""
+ if not self._provenance_enabled():
+ return
+ record_export(
+ output_path, self.current_project_file, operation, output_format,
+ start_time, self._provenance_template(),
+ )
+
def open_project(self):
"""Open an existing project file."""
if not self.check_unsaved_changes():
@@ -721,17 +766,20 @@ def _ensure_original_view_for_save(self):
def save_project(self) -> bool:
"""Save the current project. Returns False if the user cancels."""
+ from datetime import datetime, timezone
self._ensure_original_view_for_save()
if not self._prompt_and_handle_unapplied_adjustment():
return False
self._sync_adjustment_to_project()
if self.current_project_file:
try:
+ start_time = datetime.now(timezone.utc)
self.project.save(self.current_project_file)
self.undo_stack.setClean()
self.modified = False
self.update_window_title()
self.statusBar().showMessage(f"Project saved: {self.current_project_file}")
+ self._record_project_provenance(self.current_project_file, start_time)
return True
except Exception as e:
show_error(self, f"Failed to save project:\n{str(e)}", "Error")
@@ -741,26 +789,34 @@ def save_project(self) -> bool:
def save_project_as(self) -> bool:
"""Save the project with a new name. Returns False if the user cancels."""
+ from datetime import datetime, timezone
self._ensure_original_view_for_save()
if not self._prompt_and_handle_unapplied_adjustment():
return False
self._sync_adjustment_to_project()
+ suggested = self._last_file_directory
+ if (not self.current_project_file
+ and self.project.image_path
+ and Path(self.project.image_path).stem):
+ suggested = str(Path(self._last_file_directory) / (Path(self.project.image_path).stem + ".orbit"))
file_path, _ = QFileDialog.getSaveFileName(
self,
"Save Project As",
- self._last_file_directory,
+ suggested,
"ORBIT Projects (*.orbit);;JSON Files (*.json);;All Files (*)"
)
if file_path:
self._remember_directory(file_path)
try:
+ start_time = datetime.now(timezone.utc)
self.current_project_file = Path(file_path)
self.project.save(self.current_project_file)
self.undo_stack.setClean()
self.modified = False
self.update_window_title()
self.statusBar().showMessage(f"Project saved: {file_path}")
+ self._record_project_provenance(self.current_project_file, start_time)
return True
except Exception as e:
show_error(self, f"Failed to save project:\n{str(e)}", "Error")
@@ -849,6 +905,9 @@ def export_to_opendrive(self):
if not self._prompt_and_handle_unapplied_adjustment():
return
+ if not self._check_provenance_ready():
+ return
+
# Check if we have any roads
if not self.project.roads:
show_warning(self, "Cannot export: No roads defined in the project.\n"
@@ -856,14 +915,17 @@ def export_to_opendrive(self):
return
# Show export dialog with optional schema path for validation
- adjustment = self.image_view.current_adjustment if hasattr(self.image_view, 'current_adjustment') else None
+ from datetime import datetime, timezone
+ start_time = datetime.now(timezone.utc)
dialog = ExportDialog(
self.project, self,
xodr_schema_path=self.xodr_schema_path,
- adjustment=adjustment,
+ transformer_factory=self._make_transformer_factory(),
)
if dialog.exec() == QDialog.DialogCode.Accepted:
self.statusBar().showMessage("Export completed successfully")
+ if dialog.output_path:
+ self._record_export_provenance(dialog.output_path, "road network OpenDRIVE export", "XODR", start_time)
else:
self.statusBar().showMessage("Export cancelled")
@@ -876,6 +938,9 @@ def export_to_osm(self):
if not self._prompt_and_handle_unapplied_adjustment():
return
+ if not self._check_provenance_ready():
+ return
+
# Check if any element has geo coordinates
has_geo = any(
project_polyline.has_geo_coords()
@@ -905,6 +970,8 @@ def export_to_osm(self):
self._remember_directory(file_path)
try:
+ from datetime import datetime, timezone
+ start_time = datetime.now(timezone.utc)
# Create transformer for pixel→geo conversion (needed for connecting
# roads that only have pixel coordinates, e.g. roundabout entries/exits)
transformer = self._create_transformer(use_validation=True)
@@ -916,6 +983,7 @@ def export_to_osm(self):
if success:
show_info(self, message, "OSM Export")
self.statusBar().showMessage("OSM export completed")
+ self._record_export_provenance(_Path(file_path), "road network OSM export", "OSM", start_time)
else:
show_warning(self, message, "OSM Export")
except Exception as e:
@@ -953,6 +1021,9 @@ def export_georeferencing(self):
"""Export georeferencing parameters to JSON file."""
from orbit.export import export_georeferencing
+ if not self._check_provenance_ready():
+ return
+
# Check if we have enough control points
if len(self.project.control_points) < 3:
show_warning(
@@ -979,6 +1050,8 @@ def export_georeferencing(self):
# Resolve any unapplied adjustment before exporting — downstream tools do not
# support the adjustment field, so the exported matrices must be fully committed.
+ # (For drone-assisted mode _has_unapplied_adjustment always returns False;
+ # the adjustment is stored in project.transform_adjustment instead.)
if not self._prompt_and_handle_unapplied_adjustment():
return
@@ -999,10 +1072,9 @@ def export_georeferencing(self):
else:
proj_string = base_transformer.get_utm_projection_string()
- from orbit.utils.coordinate_transform import create_transformer as _create_transformer
- transformer = _create_transformer(
- self.project.control_points,
- self.project.transform_method,
+ # Use self._create_transformer so drone_metadata and image dimensions
+ # are included (the module-level create_transformer lacks those kwargs).
+ transformer = self._create_transformer(
use_validation=True,
export_proj_string=proj_string,
)
@@ -1011,6 +1083,9 @@ def export_georeferencing(self):
"Please check your control points.", "Transformation Error")
return
+ # Apply any stored adjustment so the exported matrices are fully baked.
+ self._apply_active_adjustment(transformer)
+
# Get image size
if self.image_view.image_item:
image_size = (
@@ -1044,8 +1119,11 @@ def export_georeferencing(self):
self._remember_directory(file_path)
# Export
+ from datetime import datetime, timezone
+ start_time = datetime.now(timezone.utc)
if export_georeferencing(self.project, Path(file_path), transformer, image_size, self.current_project_file):
self.statusBar().showMessage(f"Georeferencing exported to {file_path}")
+ self._record_export_provenance(Path(file_path), "georeferencing parameter export", "JSON", start_time)
else:
show_error(self, "Failed to export georeferencing parameters.", "Export Error")
@@ -1219,9 +1297,13 @@ def import_osm_data(self):
# Check if custom radius was requested (georef mode only)
custom_radius = dialog.get_custom_radius()
if custom_radius is not None:
- center_lon, center_lat = transformer.pixel_to_geo(
- image_width / 2.0, image_height / 2.0
- )
+ # Use the geographic centroid of the control points as the center.
+ # Using transformer.pixel_to_geo(image_width/2, image_height/2) is
+ # unreliable when control points only cover a small portion of the
+ # image — the transformer extrapolates badly far from its training data.
+ all_cps = transformer.all_control_points
+ center_lon = sum(cp.longitude for cp in all_cps) / len(all_cps)
+ center_lat = sum(cp.latitude for cp in all_cps) / len(all_cps)
bbox = calculate_bbox_from_center(center_lat, center_lon, custom_radius)
# Build ImportOptions
@@ -1278,6 +1360,7 @@ def _setup_osm_import(self, OSMImportDialog, calculate_bbox_from_image,
show_error(self, "Failed to create coordinate transformer.\n"
"Please check your control points.", "Transformation Error")
return None
+ self._apply_active_adjustment(transformer)
try:
bbox = calculate_bbox_from_image(image_width, image_height, transformer)
except Exception as e:
@@ -1348,6 +1431,15 @@ def _process_osm_import_result(self, result, source_type, file_path):
show_info(self, msg, "Import Successful")
self.project.openstreetmap_used = True
self.modified = True
+ # Record import source for provenance tracking
+ from datetime import datetime, timezone
+ src_entry = {
+ "type": "osm_file" if source_type == "file" else "osm_api",
+ "path": str(file_path) if file_path else "https://overpass-api.de/api/interpreter",
+ "timestamp": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
+ }
+ if src_entry not in self.project.source_files:
+ self.project.source_files.append(src_entry)
self.image_view.load_project(self.project)
self.elements_tree.refresh_tree()
self.road_tree.refresh_tree()
@@ -1407,6 +1499,7 @@ def import_opendrive_file(self):
scale = dialog.get_scale()
auto_georeference = dialog.get_auto_georeference()
verbose = dialog.get_verbose()
+ import_signals, import_parking, import_object_types = dialog.get_import_filter()
# Override transformer if forcing synthetic mode
if force_synthetic:
@@ -1447,7 +1540,10 @@ def import_opendrive_file(self):
import_mode=import_mode,
scale_pixels_per_meter=scale,
auto_create_control_points=auto_georeference,
- verbose=verbose
+ verbose=verbose,
+ import_signals=import_signals,
+ import_parking=import_parking,
+ import_object_types=import_object_types,
)
# Show progress dialog
@@ -1479,6 +1575,16 @@ def import_opendrive_file(self):
show_opendrive_import_report(result, self)
if result.success:
+ # Record import source for provenance tracking
+ from datetime import datetime, timezone
+ src_entry = {
+ "type": "xodr",
+ "path": str(file_path),
+ "timestamp": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
+ }
+ if src_entry not in self.project.source_files:
+ self.project.source_files.append(src_entry)
+
# Align connecting road paths to lane centers before rendering
scale_factors = self.get_current_scale()
self._align_all_junction_connecting_roads(scale_factors)
@@ -1855,6 +1961,12 @@ def open_georeferencing(self):
# Connect control points changed signal for real-time visualization updates
dialog.control_points_changed.connect(self.on_control_points_changed)
+ # Connect drone metadata changed signal
+ dialog.drone_metadata_changed.connect(self.on_control_points_changed)
+
+ # Connect control point drag signal for live matrix updates without full refresh
+ self.image_view.control_point_moved.connect(self.on_control_point_dragged)
+
# Connect dialog finished signal
dialog.finished.connect(lambda result: self.on_georef_dialog_closed(result))
@@ -1881,8 +1993,23 @@ def on_georef_dialog_closed(self, result):
# Update lane graphics with new scale
self.update_affected_road_lanes()
- # Clean up reference
+ # Clean up reference and disconnect drag signal
self.georef_dialog = None
+ try:
+ self.image_view.control_point_moved.disconnect(self.on_control_point_dragged)
+ except RuntimeError:
+ pass # Already disconnected
+
+ def on_control_point_dragged(self, control_point):
+ """Handle a control point being dragged on the image canvas.
+
+ Updates the transformer and scale display without recreating graphics items.
+ """
+ self._invalidate_cached_transformer()
+ self.update_scale_display()
+ # Keep georef dialog validation display up to date if open
+ if self.georef_dialog and hasattr(self.georef_dialog, 'update_validation'):
+ self.georef_dialog.update_validation()
def on_control_points_changed(self):
"""Handle control points being added/removed in georeferencing dialog."""
@@ -1963,6 +2090,7 @@ def _create_transformer(self, **kwargs):
self.project.transform_method,
image_width=image_width,
image_height=image_height,
+ drone_metadata=self.project.drone_metadata,
**kwargs,
)
@@ -1970,28 +2098,104 @@ def _invalidate_cached_transformer(self):
"""Invalidate the cached transformer while preserving the active adjustment."""
self._cached_transformer = None
+ def _make_transformer_factory(self):
+ """Return a factory callable for creating correctly configured export transformers.
+
+ The returned callable accepts the same kwargs as ``create_transformer``
+ (e.g. ``use_validation``, ``export_proj_string``) and automatically
+ includes drone metadata, image dimensions, and any active adjustment.
+ """
+ def factory(**kwargs):
+ t = self._create_transformer(**kwargs)
+ if t:
+ self._apply_active_adjustment(t)
+ return t
+ return factory
+
+ def _compose_with_drone_base(
+ self, new_adj: 'TransformAdjustment'
+ ) -> 'TransformAdjustment':
+ """Compose new_adj on top of any existing stored drone adjustment.
+
+ For drone-assisted transformers the stored project adjustment is the
+ accumulated base; a new UI delta must be composed on top of it so that
+ successive adjustments build on each other rather than starting fresh.
+ Returns new_adj unchanged for non-drone or when no base is stored.
+ """
+ if (self.project.transform_method != 'drone_assisted'
+ or not self.project.transform_adjustment):
+ return new_adj
+ from orbit.utils.adjustment_fitter import decompose_to_adjustment
+ base = TransformAdjustment.from_dict(self.project.transform_adjustment)
+ M = new_adj.get_adjustment_matrix() @ base.get_adjustment_matrix()
+ return decompose_to_adjustment(M, new_adj.pivot_x, new_adj.pivot_y)
+
def _apply_active_adjustment(self, transformer):
- """Apply the project's persisted adjustment to a transformer, if any."""
+ """Apply the project's persisted adjustment to a transformer, if any.
+
+ For drone-assisted transformers the 'applied' adjustment is stored in
+ project.transform_adjustment rather than current_adjustment (which is
+ kept at identity after baking). Both sources are checked.
+ """
if transformer is None:
return
adj = self.image_view.current_adjustment
if adj and not adj.is_identity():
- transformer.set_adjustment(adj)
+ # For drone-assisted, compose the live UI delta on top of the stored
+ # base so that the transformer sees the total (accumulated) adjustment.
+ transformer.set_adjustment(self._compose_with_drone_base(adj))
+ return
+ # Drone-assisted: fall back to permanently stored project adjustment
+ if (self.project.transform_method == 'drone_assisted'
+ and self.project.transform_adjustment):
+ stored = TransformAdjustment.from_dict(self.project.transform_adjustment)
+ if not stored.is_identity():
+ transformer.set_adjustment(stored)
def _sync_adjustment_to_project(self):
- """Clear any stored adjustment from the project (adjustments are resolved before save)."""
+ """Clear or preserve adjustment for saving.
+
+ For homography/affine: clear (already baked into CP positions).
+ For drone-assisted: keep project.transform_adjustment as-is; it IS
+ the permanent correction for the physics-based transformer.
+ """
+ if self.project.transform_method == 'drone_assisted':
+ return
self.project.transform_adjustment = None
def _has_unapplied_adjustment(self) -> bool:
- """Return True if there is an active non-identity adjustment that has not been baked."""
+ """Return True if there is an active non-identity adjustment that has not been baked.
+
+ For drone-assisted transformers the adjustment cannot be baked into CP
+ positions; it is instead stored in project.transform_adjustment. When
+ using drone-assisted mode the adjustment is always considered 'applied'
+ (either it's stored in the project or hasn't been computed yet) so we
+ never block saves with a "unapplied adjustment" prompt.
+ """
+ if self.project.transform_method == 'drone_assisted':
+ return False
adj = self.image_view.current_adjustment
return adj is not None and not adj.is_identity()
def _bake_adjustment_into_control_points(self):
- """Bake the current adjustment into CP pixel positions and clear it (no dialog)."""
+ """Bake the current adjustment into CP pixel positions and clear it (no dialog).
+
+ For drone-assisted transformers the adjustment is stored in the project
+ instead of CP pixels (see apply_adjustment_to_control_points).
+ """
adj = self.image_view.current_adjustment
if adj is None or adj.is_identity():
return
+ if self.project.transform_method == 'drone_assisted':
+ # Can't bake into CPs — persist in project and re-apply to transformer
+ self.project.transform_adjustment = adj.to_dict()
+ self.image_view.reset_adjustment()
+ self._remove_adjustment_ghost()
+ self._invalidate_cached_transformer()
+ self._cached_transformer = self._create_transformer(use_validation=True)
+ self._apply_active_adjustment(self._cached_transformer)
+ self.refresh_imported_geometry()
+ return
for cp in self.project.control_points:
cp.pixel_x, cp.pixel_y = adj.apply_to_point(cp.pixel_x, cp.pixel_y)
self.image_view.reset_adjustment()
@@ -2601,8 +2805,13 @@ def apply_adjustment_to_control_points(self):
"""
Apply current adjustment to control points.
- This "bakes" the adjustment into the control point positions,
- then recomputes the transformation with the new positions.
+ For homography/affine transformers this "bakes" the adjustment into
+ the control point pixel positions and recomputes the transformation.
+
+ For drone-assisted transformers the core physics matrix is independent
+ of CP pixel positions, so baking into CPs has no effect. Instead the
+ adjustment is stored permanently in the project and kept active on the
+ transformer so it persists across saves/reloads.
"""
adjustment = self.image_view.get_adjustment()
if adjustment is None or adjustment.is_identity():
@@ -2613,7 +2822,45 @@ def apply_adjustment_to_control_points(self):
self.statusBar().showMessage("No control points to adjust")
return
- # Confirm with user
+ is_drone = self.project.transform_method == 'drone_assisted'
+
+ if is_drone:
+ # For drone-assisted: can't bake into CPs because the physics matrix
+ # ignores CP pixel positions. Persist the adjustment in the project so
+ # it survives save/reload, and keep it applied to the transformer.
+ if not ask_yes_no(
+ self,
+ "The drone-assisted transformer is physics-based and cannot absorb "
+ "the adjustment via control point positions.\n\n"
+ "The adjustment will be stored permanently with the project and "
+ "re-applied automatically on every reload.\n\n"
+ "Continue?",
+ "Apply Adjustment"
+ ):
+ return
+
+ self.project.transform_adjustment = self._compose_with_drone_base(
+ adjustment).to_dict()
+ # Reset current_adjustment so the panel and save-prompt see no pending
+ # adjustment (the stored project value is the authoritative source now).
+ self.image_view.reset_adjustment()
+ self._remove_adjustment_ghost()
+
+ # Rebuild transformer; _apply_active_adjustment will re-apply the
+ # stored project adjustment via the drone-assisted fallback path.
+ self._invalidate_cached_transformer()
+ self._cached_transformer = self._create_transformer(use_validation=True)
+ self._apply_active_adjustment(self._cached_transformer)
+
+ self.refresh_imported_geometry()
+ self.modified = True
+ self.update_window_title()
+ self.statusBar().showMessage(
+ "Adjustment stored permanently for drone-assisted transformer"
+ )
+ return
+
+ # --- homography / affine: bake adjustment into CP pixel positions ---
if not ask_yes_no(
self,
"This will modify the pixel positions of all control points "
@@ -2790,8 +3037,33 @@ def _switch_to_aerial(self):
show_warning(self, "Cannot create coordinate transformer.")
self.toggle_aerial_action.setChecked(False)
return
+ # Save the current_adjustment so we can restore it precisely on return.
+ # Without this, current_adjustment keeps the aerial value and causes
+ # _apply_active_adjustment to corrupt the original transformer on the
+ # next aerial switch.
+ self._original_view_adjustment = self.image_view.current_adjustment
self._apply_active_adjustment(self._original_transformer)
self._original_image_np = self.image_view.image_np.copy() if self.image_view.image_np is not None else None
+ # Save exact user-placed pixel positions so the round-trip can restore them
+ # precisely (geo_to_pixel on the least-squares transformer does not reproduce
+ # training point positions exactly when there are more than the minimum points).
+ self._original_cp_pixels = [(cp.pixel_x, cp.pixel_y) for cp in self.project.control_points]
+
+ # Save connecting road pixel paths so geo_to_pixel round-trip errors can't corrupt them.
+ # geo_to_pixel may return out-of-bounds coords for physics-based transformers when a
+ # road is outside the camera's field of view.
+ self._original_cr_paths = {}
+ self._original_junction_centers = {}
+ for junction in self.project.junctions:
+ if junction.center_point:
+ self._original_junction_centers[junction.id] = {
+ 'center': junction.center_point,
+ 'roundabout': junction.roundabout_center,
+ }
+ for cr_id in junction.connecting_road_ids:
+ cr = self.project.get_road(cr_id)
+ if cr and cr.inline_path:
+ self._original_cr_paths[cr_id] = list(cr.inline_path)
# Remove ghost overlay before switching (will be rebuilt on return)
self._remove_adjustment_ghost()
@@ -2818,6 +3090,10 @@ def _switch_to_aerial(self):
self.toggle_aerial_action.setChecked(False)
self._original_image_np = None
self._original_transformer = None
+ self._original_cp_pixels = []
+ self._original_cr_paths = {}
+ self._original_view_adjustment = None
+ self._original_junction_centers = {}
return
# Build initial affine transformer from raw tile image bounds
@@ -2831,6 +3107,10 @@ def _switch_to_aerial(self):
self.toggle_aerial_action.setChecked(False)
self._original_image_np = None
self._original_transformer = None
+ self._original_cp_pixels = []
+ self._original_cr_paths = {}
+ self._original_view_adjustment = None
+ self._original_junction_centers = {}
return
# Resize aerial image so its pixels/meter matches the original image.
@@ -2854,8 +3134,18 @@ def _switch_to_aerial(self):
self.toggle_aerial_action.setChecked(False)
self._original_image_np = None
self._original_transformer = None
+ self._original_cp_pixels = []
+ self._original_cr_paths = {}
+ self._original_view_adjustment = None
+ self._original_junction_centers = {}
return
+ # Ensure CR/junction geo coords are consistent with the original
+ # transformer before reprojecting. Stale geo_coords (from a previous
+ # transformer or from the junction analyzer's endpoint snapping) would
+ # cause reproject_project_geometry to place CRs at wrong aerial pixels.
+ self._resync_junction_geo_coords(self._original_transformer)
+
# Re-project all geometry into the aerial pixel space
count = reproject_project_geometry(
self.project, self._original_transformer, self._aerial_transformer,
@@ -2897,6 +3187,14 @@ def _switch_to_original(self):
self.project, self._aerial_transformer, self._original_transformer,
)
+ # Restore exact user-placed CP pixel positions. reproject_project_geometry
+ # computes them via geo_to_pixel, which doesn't reproduce the original positions
+ # exactly when the least-squares transform has non-zero residuals (>min points).
+ if self._original_cp_pixels:
+ for cp, (px, py) in zip(self.project.control_points, self._original_cp_pixels):
+ cp.pixel_x = px
+ cp.pixel_y = py
+
# Restore adjustment and reposition geo-derived entities
if saved_adjustment is not None:
self._original_transformer.set_adjustment(saved_adjustment)
@@ -2904,12 +3202,61 @@ def _switch_to_original(self):
self.image_view.swap_background(self._original_image_np)
self._cached_transformer = self._original_transformer
+ # Restore the original view's adjustment into current_adjustment.
+ # While in aerial view, current_adjustment held the aerial adjustment; leaving
+ # it there would cause _apply_active_adjustment to corrupt the original transformer
+ # on the next aerial switch, and incorrectly trigger update_all_from_geo_coords
+ # with the wrong (aerial) adjustment value.
+ self.image_view.current_adjustment = self._original_view_adjustment
+
# Recompute pixel positions from geo coords using the adjusted transformer
- # so entities land in the correct adjusted positions. Control points are
- # NOT updated here — they keep their unadjusted positions from reprojection.
- adj = self.image_view.current_adjustment
- if adj and not adj.is_identity():
+ # so entities land in the correct adjusted positions.
+ adj = self._original_view_adjustment
+ drone_adj = (
+ self.project.transform_method == 'drone_assisted'
+ and self.project.transform_adjustment
+ and not TransformAdjustment.from_dict(
+ self.project.transform_adjustment).is_identity()
+ )
+ if (adj and not adj.is_identity()) or drone_adj:
self.image_view.update_all_from_geo_coords(self._cached_transformer)
+
+ # Restore connecting road pixel paths AFTER update_all_from_geo_coords, because
+ # that call re-runs geo_to_pixel via the drone transformer which returns large
+ # out-of-bounds coordinates for CRs whose geo positions are outside the camera's
+ # field of view, undoing the reproject_project_geometry result. We restore the
+ # exact pre-aerial pixel positions as the authoritative source for all CRs
+ # (in-FOV and out-of-FOV alike) so that subsequent load_project picks them up.
+ if self._original_cr_paths:
+ for junction in self.project.junctions:
+ for cr_id in junction.connecting_road_ids:
+ if cr_id in self._original_cr_paths:
+ cr = self.project.get_road(cr_id)
+ if cr:
+ cr.inline_path = list(self._original_cr_paths[cr_id])
+
+ # Restore junction center_point pixel positions for the same reason: the drone
+ # camera model's geo_to_pixel gives out-of-bounds values for junctions that are
+ # outside its field of view.
+ if self._original_junction_centers:
+ for junction in self.project.junctions:
+ saved = self._original_junction_centers.get(junction.id)
+ if saved:
+ junction.center_point = saved['center']
+ junction.roundabout_center = saved['roundabout']
+
+ # Regenerate CR paths from current road positions. After restoring
+ # _original_cr_paths the saved positions may no longer match roads that
+ # were moved in aerial view, leaving gaps at junction endpoints.
+ # Parampoly3 CRs need a full spline rebuild; lane-aligned CRs need
+ # their lane-offset endpoints recomputed; non-aligned polyline CRs are
+ # handled by the endpoint snap inside _resync_junction_geo_coords.
+ self._regenerate_all_junction_crs()
+
+ # Resync geo coords to match the restored pixel positions so the
+ # next reproject (or save) sees consistent geo_coords.
+ self._resync_junction_geo_coords(self._original_transformer)
+
self._aerial_view_active = False
# Refresh scene
@@ -2925,6 +3272,10 @@ def _switch_to_original(self):
self._original_image_np = None
self._original_transformer = None
self._aerial_transformer = None
+ self._original_cp_pixels = []
+ self._original_cr_paths = {}
+ self._original_junction_centers = {}
+ self._original_view_adjustment = None
self.toggle_aerial_action.setText("&Aerial Map View")
self.toggle_aerial_action.setEnabled(True)
@@ -2953,6 +3304,14 @@ def _initialize_and_refresh_geo_coords(self):
except Exception:
return
+ # For drone-assisted projects with a saved adjustment, apply it before
+ # syncing geo coords from pixel positions. Saved pixel positions are in
+ # "adjusted" space (the user placed them while the adjustment was active),
+ # so pixel→geo conversion must use the same adjusted transformer that was
+ # active at save time. Without this, _resync_junction_geo_coords produces
+ # wrong geo coords and _restore_adjustment_from_project double-shifts them.
+ self._apply_active_adjustment(transformer)
+
# Initialize geo_path for connecting roads that don't have it (legacy support)
for junction in self.project.junctions:
for cr_id in junction.connecting_road_ids:
@@ -2964,10 +3323,125 @@ def _initialize_and_refresh_geo_coords(self):
# (only for CRs without lane connections; lane-aligned CRs are skipped)
self._snap_connecting_road_endpoints()
+ # Resync CR/junction geo coords from pixel positions to ensure
+ # consistency with the active transformer. Without this, a transformer
+ # change (e.g. switching to drone_assisted) leaves stale geo_coords
+ # that cause reproject_project_geometry to produce wrong pixel positions.
+ self._resync_junction_geo_coords(transformer)
+
def _snap_connecting_road_endpoints(self):
"""Snap CR pixel endpoints to match connected road endpoints."""
self.controller.snap_connecting_road_endpoints()
+ def _resync_junction_geo_coords(self, transformer):
+ """Recompute CR and junction geo coords from pixel positions.
+
+ Connecting road pixel paths are authoritative (generated by curve
+ fitting); their inline_geo_path may become stale when the transformer
+ changes (e.g. switching to drone_assisted). Resyncing ensures
+ geo_to_pixel(geo) == pixel for every CR, which is required for
+ reproject_project_geometry to produce correct results.
+
+ After resyncing, CR endpoints are snapped to the connected polyline
+ endpoints (both pixel and geo) so that no gap appears after reprojection.
+ """
+ if transformer is None:
+ return
+ for junction in self.project.junctions:
+ if junction.center_point:
+ lon, lat = transformer.pixel_to_geo(
+ junction.center_point[0], junction.center_point[1],
+ )
+ junction.geo_center_point = (lon, lat)
+ if junction.roundabout_center:
+ rlon, rlat = transformer.pixel_to_geo(
+ junction.roundabout_center[0],
+ junction.roundabout_center[1],
+ )
+ junction.geo_roundabout_center = (rlon, rlat)
+
+ # Build set of CRs with lane connections (those are lane-aligned)
+ aligned_cr_ids = {
+ c.connecting_road_id
+ for c in (junction.lane_connections or [])
+ if c.connecting_road_id
+ }
+
+ for cr_id in junction.connecting_road_ids:
+ cr = self.project.get_road(cr_id)
+ if not cr or not cr.inline_path:
+ continue
+
+ # Snap pixel endpoints to connected polyline endpoints
+ if cr_id not in aligned_cr_ids:
+ self._snap_cr_endpoints_pixel(cr)
+
+ # Recompute full geo path from (snapped) pixel
+ cr.inline_geo_path = [
+ transformer.pixel_to_geo(x, y)
+ for x, y in cr.inline_path
+ ]
+
+ # Snap geo endpoints to connected polyline geo endpoints
+ # so that reproject produces identical pixel positions for
+ # the CR endpoint and its connecting polyline endpoint.
+ if cr_id not in aligned_cr_ids:
+ self._snap_cr_endpoints_geo(cr)
+
+ def _snap_cr_endpoints_pixel(self, cr):
+ """Snap a CR's pixel endpoints to the connected polyline endpoints."""
+ pred = self.project.get_road(cr.predecessor_id)
+ succ = self.project.get_road(cr.successor_id)
+ pred_pl = self.project.get_polyline(pred.centerline_id) if pred else None
+ succ_pl = self.project.get_polyline(succ.centerline_id) if succ else None
+ if pred_pl and pred_pl.points:
+ cr.inline_path[0] = (
+ pred_pl.points[-1] if cr.predecessor_contact == 'end'
+ else pred_pl.points[0]
+ )
+ if succ_pl and succ_pl.points:
+ cr.inline_path[-1] = (
+ succ_pl.points[-1] if cr.successor_contact == 'end'
+ else succ_pl.points[0]
+ )
+
+ def _snap_cr_endpoints_geo(self, cr):
+ """Snap a CR's geo endpoints to the connected polyline geo endpoints."""
+ if not cr.inline_geo_path:
+ return
+ pred = self.project.get_road(cr.predecessor_id)
+ succ = self.project.get_road(cr.successor_id)
+ pred_pl = self.project.get_polyline(pred.centerline_id) if pred else None
+ succ_pl = self.project.get_polyline(succ.centerline_id) if succ else None
+ if pred_pl and pred_pl.geo_points:
+ cr.inline_geo_path[0] = (
+ pred_pl.geo_points[-1] if cr.predecessor_contact == 'end'
+ else pred_pl.geo_points[0]
+ )
+ if succ_pl and succ_pl.geo_points:
+ cr.inline_geo_path[-1] = (
+ succ_pl.geo_points[-1] if cr.successor_contact == 'end'
+ else succ_pl.geo_points[0]
+ )
+
+ def _regenerate_all_junction_crs(self):
+ """Regenerate all CR paths from current road positions after a view switch.
+
+ Called after _original_cr_paths is restored so that CRs which connect
+ to roads that were moved in aerial view are rebuilt from the new road
+ positions rather than the stale saved positions.
+ """
+ # Rebuild parampoly3 CRs from connected road endpoints
+ for junction in self.project.junctions:
+ for cr_id in junction.connecting_road_ids:
+ cr = self.project.get_road(cr_id)
+ if cr and cr.geometry_type == "parampoly3":
+ self.controller._regenerate_parampoly3_cr(cr)
+
+ # Re-apply lane-alignment offsets for all junctions that have them
+ scale_factors = self.get_current_scale()
+ self.controller.align_all_junction_crs(scale_factors)
+
def update_affected_road_lanes(self):
"""Update lane graphics for all roads with centerlines."""
# Get current scale factors if available
diff --git a/orbit/gui/undo_commands.py b/orbit/gui/undo_commands.py
index 44b3b6c..ffdfbbd 100644
--- a/orbit/gui/undo_commands.py
+++ b/orbit/gui/undo_commands.py
@@ -13,6 +13,7 @@
from orbit.models import Junction, LaneConnection, ParkingSpace, Polyline, Road, RoadObject, Signal
if TYPE_CHECKING:
+ from .image_view import ImageView
from .main_window import MainWindow
@@ -1349,3 +1350,35 @@ def _apply_data(self, data: dict):
scale_factor = self.main_window.get_current_scale()
self.main_window.image_view.add_parking_graphics(parking, scale_factor)
self.main_window._refresh_trees()
+
+
+class SmoothCRCommand(QUndoCommand):
+ """Command for smoothing a connecting road's inline_path."""
+
+ def __init__(
+ self,
+ image_view: 'ImageView',
+ cr_road: 'Road',
+ old_inline_path: list,
+ new_inline_path: list,
+ description: str = "Smooth Connecting Road Curve",
+ ):
+ super().__init__(description)
+ self.image_view = image_view
+ self.cr_road = cr_road
+ self.old_inline_path = [tuple(p) for p in old_inline_path]
+ self.new_inline_path = [tuple(p) for p in new_inline_path]
+ self._first_redo = True
+
+ def redo(self):
+ if self._first_redo:
+ self._first_redo = False
+ return
+ self._apply(self.new_inline_path)
+
+ def undo(self):
+ self._apply(self.old_inline_path)
+
+ def _apply(self, path: list):
+ self.cr_road.inline_path = list(path)
+ self.image_view.update_connecting_road_graphics(self.cr_road.id)
diff --git a/orbit/gui/widgets/elements_tree.py b/orbit/gui/widgets/elements_tree.py
index dee33f6..ad70c29 100644
--- a/orbit/gui/widgets/elements_tree.py
+++ b/orbit/gui/widgets/elements_tree.py
@@ -356,14 +356,14 @@ def create_object_item(self, obj) -> QTreeWidgetItem:
if obj.road_id and self.project:
road = self.project.get_road(obj.road_id)
if road:
- road_name = road.name or f"Road {road.id[:8]}"
+ road_name = f"Road {road.id}" + (f" – {road.name}" if road.name else "")
road_info = f" → {road_name}"
else:
cr = self.project.get_road(obj.road_id)
if cr and cr.is_connecting_road:
road_info = f" → CR {cr.id[:8]}"
- text = f"{display_name} ({category}){road_info}"
+ text = f"#{obj.id} {display_name} ({category}){road_info}"
item = QTreeWidgetItem([text])
item.setData(0, Qt.ItemDataRole.UserRole, {"type": "object", "id": obj.id})
diff --git a/orbit/import/opendrive_importer.py b/orbit/import/opendrive_importer.py
index f2dcab0..d376172 100644
--- a/orbit/import/opendrive_importer.py
+++ b/orbit/import/opendrive_importer.py
@@ -38,6 +38,80 @@
_DEFAULT_LANE_WIDTH = 3.5 # Fallback when ODR has no lane width
+def classify_xodr_object_type(odr_type: str, odr_subtype: str = "") -> Optional[ObjectType]:
+ """Map an OpenDRIVE object type+subtype to an ORBIT ObjectType.
+
+ Module-level so it can be used both by the importer and the file scanner.
+ """
+ t = odr_type.lower()
+ s = odr_subtype.lower()
+
+ if 'lamp' in t or 'pole' in t:
+ return ObjectType.LAMPPOST
+ elif 'guard' in t or 'rail' in t or 'barrier' in t:
+ return ObjectType.GUARDRAIL
+ elif 'building' in t or 'house' in t:
+ return ObjectType.BUILDING
+ elif 'vegetation' in t or 'tree' in t or 'bush' in t or 'shrub' in t or 'forest' in t:
+ if 'forest' in s or 'forest' in t:
+ return ObjectType.LANDUSE_FOREST
+ elif 'meadow' in s:
+ return ObjectType.LANDUSE_MEADOW
+ elif 'scrub' in s:
+ return ObjectType.LANDUSE_SCRUB
+ elif 'conifer' in s or 'pine' in s or 'conifer' in t or 'pine' in t:
+ return ObjectType.TREE_CONIFER
+ elif 'tree' in s or 'tree' in t:
+ return ObjectType.TREE_BROADLEAF
+ elif 'bush' in s or 'shrub' in s or 'bush' in t or 'shrub' in t:
+ return ObjectType.BUSH
+ return ObjectType.TREE_BROADLEAF
+ elif 'land' in t:
+ if 'farmland' in s or 'farm' in s:
+ return ObjectType.LANDUSE_FARMLAND
+ elif 'meadow' in s:
+ return ObjectType.LANDUSE_MEADOW
+ elif 'water' in t:
+ if 'wetland' in s:
+ return ObjectType.NATURAL_WETLAND
+ return ObjectType.NATURAL_WATER
+
+ return None
+
+
+def scan_xodr_feature_categories(file_path: str) -> Dict[str, int]:
+ """Scan an xodr file and return counts of feature categories present.
+
+ Uses lxml iterparse for efficiency — does not build a full parse tree.
+
+ Returns a dict with keys:
+ "signals" — count of elements
+ "parking" — count of