š Load CSV File
+ + +Pi Directory Files:
+ + +Or Upload from Browser:
++ to upload path data +
diff --git a/README.md b/README.md index eb7fba381..bc4b6c2b6 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@  -Donkeycar is minimalist and modular self driving library for Python. It is developed for hobbyists and students with a focus on allowing fast experimentation and easy community contributions. It is being actively used at the high school and university level for learning and research. It offers a [rich graphical interface](https://docs.donkeycar.com/utility/ui/) and includes a [simulator](https://docs.donkeycar.com/guide/deep_learning/simulator/) so you can experiment with self-driving even before you build a robot. +Donkeycar is a minimalist and modular self driving library for Python. It is developed for hobbyists and students with a focus on allowing fast experimentation and easy community contributions. It is being actively used at the high school and university level for learning and research. It offers a [rich graphical interface](https://docs.donkeycar.com/utility/ui/) and includes a [simulator](https://docs.donkeycar.com/guide/deep_learning/simulator/) so you can experiment with self-driving even before you build a robot. #### Quick Links * [Donkeycar Updates & Examples](http://donkeycar.com) diff --git a/donkeycar/__init__.py b/donkeycar/__init__.py index 00cc20972..c982ca604 100644 --- a/donkeycar/__init__.py +++ b/donkeycar/__init__.py @@ -3,7 +3,7 @@ from pyfiglet import Figlet import logging -__version__ = '5.2.dev2' +__version__ = '5.3.dev1' logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO').upper()) diff --git a/donkeycar/parts/camera.py b/donkeycar/parts/camera.py index 1377089c7..228ea0acb 100644 --- a/donkeycar/parts/camera.py +++ b/donkeycar/parts/camera.py @@ -35,7 +35,7 @@ def __init__(self, image_w=160, image_h=120, image_d=3, self.camera = Picamera2() config = self.camera.create_preview_configuration( config_dict, transform=transform) - self.camera.align_configuration(config) + # self.camera.align_configuration(config) # this created issues with the libcamera2 library on the Pi5, which automatically changes the resolution to 128x120 in an attempt to align with native sensor resolution self.camera.configure(config) # try min / max frame rate as 0.1 / 1 ms (it will be slower though) self.camera.set_controls({"FrameDurationLimits": (100, 1000)}) diff --git a/donkeycar/parts/interpreter.py b/donkeycar/parts/interpreter.py index 8af44e748..c37cf7315 100755 --- a/donkeycar/parts/interpreter.py +++ b/donkeycar/parts/interpreter.py @@ -4,14 +4,38 @@ import numpy as np from typing import Union, Sequence, List -import tensorflow as tf -from tensorflow import keras -from tensorflow.python.saved_model import tag_constants, signature_constants -from tensorflow.python.compiler.tensorrt import trt_convert as trt +try: + import tensorflow as tf + from tensorflow import keras + from tensorflow.python.saved_model import tag_constants, signature_constants + from tensorflow.python.compiler.tensorrt import trt_convert as trt +except ImportError: + tf = None + keras = None + tag_constants = None + signature_constants = None + trt = None logger = logging.getLogger(__name__) +def get_tflite_interpreter(): + """Get TFLite Interpreter from tflite-runtime or full TensorFlow.""" + try: + from tflite_runtime.interpreter import Interpreter + return Interpreter + except ImportError: + pass + try: + from ai_edge_litert.interpreter import Interpreter + return Interpreter + except ImportError: + pass + if tf is not None: + return tf.lite.Interpreter + raise ImportError("No TFLite runtime found. Install tflite-runtime or tensorflow.") + + def has_trt_support(): try: converter = trt.TrtGraphConverterV2() @@ -91,14 +115,14 @@ def set_model(self, pilot: 'KerasPilot') -> None: """ Some interpreters will need the model""" pass - def set_optimizer(self, optimizer: tf.keras.optimizers.Optimizer) -> None: + def set_optimizer(self, optimizer) -> None: pass def compile(self, **kwargs): raise NotImplementedError('Requires implementation') @abstractmethod - def get_input_shape(self, input_name) -> tf.TensorShape: + def get_input_shape(self, input_name): pass def predict(self, img_arr: np.ndarray, *other_arr: np.ndarray) \ @@ -127,7 +151,7 @@ class KerasInterpreter(Interpreter): def __init__(self): super().__init__() - self.model: tf.keras.Model = None + self.model = None def set_model(self, pilot: 'KerasPilot') -> None: self.model = pilot.create_model() @@ -146,10 +170,10 @@ def set_model(self, pilot: 'KerasPilot') -> None: self.shapes = (dict(zip(self.input_keys, input_shape)), dict(zip(self.output_keys, output_shape))) - def set_optimizer(self, optimizer: tf.keras.optimizers.Optimizer) -> None: + def set_optimizer(self, optimizer) -> None: self.model.optimizer = optimizer - def get_input_shape(self, input_name) -> tf.TensorShape: + def get_input_shape(self, input_name): assert self.model, 'Model not set' return self.shapes[0][input_name] @@ -174,6 +198,18 @@ def predict_from_dict(self, input_dict): def load(self, model_path: str) -> None: logger.info(f'Loading model {model_path}') self.model = keras.models.load_model(model_path, compile=False) + # Set input_keys and output_keys after loading (same as set_model) + input_shape = self.model.input_shape + if type(input_shape) is not list: + input_shape = [input_shape] + output_shape = self.model.output_shape + if type(output_shape) is not list: + output_shape = [output_shape] + + self.input_keys = self.model.input_names + self.output_keys = self.model.output_names + self.shapes = (dict(zip(self.input_keys, input_shape)), + dict(zip(self.output_keys, output_shape))) def load_weights(self, model_path: str, by_name: bool = True) -> \ None: @@ -263,7 +299,8 @@ def load(self, model_path): 'TFlitePilot should load only .tflite files' logger.info(f'Loading model {model_path}') # Load TFLite model and extract input and output keys - self.interpreter = tf.lite.Interpreter(model_path=model_path) + Interpreter = get_tflite_interpreter() + self.interpreter = Interpreter(model_path=model_path) self.signatures = self.interpreter.get_signature_list() self.runner = self.interpreter.get_signature_runner() self.input_keys = self.signatures['serving_default']['inputs'] @@ -312,7 +349,7 @@ def set_model(self, pilot: 'KerasPilot') -> None: # state as the trt model hasn't been loaded yet self.pilot = pilot - def get_input_shape(self, input_name) -> tf.TensorShape: + def get_input_shape(self, input_name): assert self.graph_func, "Requires loadin the tensorrt model first" return self.graph_func.structured_input_signature[1][input_name].shape diff --git a/donkeycar/parts/keras.py b/donkeycar/parts/keras.py index 9c89f81a5..1df64932e 100644 --- a/donkeycar/parts/keras.py +++ b/donkeycar/parts/keras.py @@ -15,23 +15,27 @@ from typing import Dict, Tuple, Optional, Union, List, Sequence, Callable, Any from logging import getLogger -from tensorflow.python.data.ops.dataset_ops import DatasetV1, DatasetV2 - import donkeycar as dk from donkeycar.utils import normalize_image, linear_bin from donkeycar.pipeline.types import TubRecord from donkeycar.parts.interpreter import Interpreter, KerasInterpreter -import tensorflow as tf -from tensorflow import keras -from tensorflow.keras.layers import (Dense, Input,Convolution2D, - MaxPooling2D, Activation, Dropout, Flatten, LSTM, BatchNormalization, - Conv3D, MaxPooling3D, Conv2DTranspose) - -from tensorflow.keras.layers import TimeDistributed as TD -from tensorflow.keras.backend import concatenate -from tensorflow.keras.models import Model -from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint +try: + import tensorflow as tf + from tensorflow import keras + from tensorflow.python.data.ops.dataset_ops import DatasetV1, DatasetV2 + from tensorflow.keras.layers import (Dense, Input, Convolution2D, + MaxPooling2D, Activation, Dropout, Flatten, LSTM, BatchNormalization, + Conv3D, MaxPooling3D, Conv2DTranspose) + from tensorflow.keras.layers import TimeDistributed as TD + from tensorflow.keras.backend import concatenate + from tensorflow.keras.models import Model + from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint +except ImportError: + tf = None + keras = None + DatasetV1 = None + DatasetV2 = None ONE_BYTE_SCALE = 1.0 / 255.0 @@ -86,7 +90,7 @@ def set_optimizer(self, optimizer_type: str, raise Exception(f"Unknown optimizer type: {optimizer_type}") self.interpreter.set_optimizer(optimizer) - def get_input_shape(self, input_name) -> tf.TensorShape: + def get_input_shape(self, input_name): return self.interpreter.get_input_shape(input_name) def seq_size(self) -> int: @@ -110,9 +114,8 @@ def run(self, img_arr: np.ndarray, *other_arr: List[float]) \ # self.output_shape() first dictionary keys, because that's how we # set up the model values = (norm_img_arr, ) + np_other_array - # note output_shapes() returns a 2-tuple of dicts for input shapes - # and output shapes(), so we need the first tuple here - input_dict = dict(zip(self.output_shapes()[0].keys(), values)) + # use interpreter's input_keys directly (works with TFLite and Keras) + input_dict = dict(zip(self.interpreter.input_keys, values)) return self.inference_from_dict(input_dict) def inference_from_dict(self, input_dict: Dict[str, np.ndarray]) \ @@ -146,7 +149,7 @@ def train(self, verbose: int = 1, min_delta: float = .0005, patience: int = 5, - show_plot: bool = False) -> tf.keras.callbacks.History: + show_plot: bool = False): """ trains the model """ @@ -242,7 +245,7 @@ def output_types(self) -> Tuple[Dict[str, np.typename], ...]: types = tuple({k: tf.float64 for k in d} for d in shapes) return types - def output_shapes(self) -> Dict[str, tf.TensorShape]: + def output_shapes(self): return {} def __str__(self) -> str: diff --git a/donkeycar/parts/oak_d.py b/donkeycar/parts/oak_d.py new file mode 100644 index 000000000..c290e5b3f --- /dev/null +++ b/donkeycar/parts/oak_d.py @@ -0,0 +1,381 @@ +""" +Author: Brian Henry & Manav Gagvani +File: oak_d.py +Date: February 13 2022, revised July 10, 2025 +Notes: + Based on realsense435i.py by Ed Murphy: https://github.com/autorope/donkeycar/blob/454be3068ea5dfbac226c3be4d84b0a61d1cec84/donkeycar/parts/realsense435i.py + Based on https://github.com/luxonis/depthai-tutorials/blob/d571473911f876b0d4ac52b7ffdc0fb2beae1641/1-hello-world/hello_world.py + + https://docs.luxonis.com/en/latest/pages/tutorials/first_steps/#first-steps-with-depthai + > If you are using a Linux system, in most cases you have to add a new udev rule for our script to be able to access the device correctly. You can add and apply new rules by running + $ echo 'SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"' | sudo tee /etc/udev/rules.d/80-movidius.rules + $ sudo udevadm control --reload-rules && sudo udevadm trigger + (or: "RuntimeError: No DepthAI (Oak-D-Lite) device (camera) found!") + + `sudo pip3 install --extra-index-url https://developer.download.nvidia.com/compute/redist/jp/v461 tensorflow` +""" + +import argparse +import string +import time +import sys + +import numpy as np # numpy - manipulate the packet data returned by depthai +import cv2 as cv2 # opencv - display the video stream +import depthai # depthai - access the camera and its data packets +from depthai import Pipeline, DataOutputQueue, ImgFrame, ImgDetections, ImgDetection +from numpy import ndarray +from typing import List + +WIDTH = 640 +HEIGHT = 480 + + +class OakD(object): + """ + Donkeycar part for the Oak-D camera + Intel Movidius based depth sensing camera + https://docs.luxonis.com/projects/hardware/en/latest/pages/DM9095.html + https://www.kickstarter.com/projects/opencv/opencv-ai-kit-oak-depth-camera-4k-cv-edge-object-detection + https://shop.luxonis.com/ + """ + + def __init__( + self, + width=WIDTH, + height=HEIGHT, + enable_rgb=True, + enable_depth=True, + device_id=None, + ): + self.device_id = device_id # "18443010C1E4681200" # serial number of device to use|None to use default|"list" to list devices and exit + self.enable_rgb = enable_rgb + self.enable_depth = enable_depth + + self.width = width + self.height = height + + # TODO: Accommodate using device native resolutions to avoid resizing. + self.resize = (width != WIDTH) or (height != HEIGHT) + if self.resize: + print( + f"The output images will be resized from {(WIDTH, HEIGHT)} to {(self.width, self.height)} using OpenCV. Device resolution in use is 640x480." + ) + + self.pipeline = None + if self.enable_depth or self.enable_rgb: + self.pipeline = depthai.Pipeline() + + device_info = self.get_depthai_device_info(device_id) + + if self.enable_depth: + self.setup_depth_camera(WIDTH, HEIGHT) + + if self.enable_rgb: + self.setup_rgb_camera(WIDTH, HEIGHT) + + self.oak_d_device = depthai.Device(self.pipeline, device_info) + + # initialize frame state + self.color_image = None + self.depth_image = None + self.frame_count = 0 + self.start_time = time.time() + self.frame_time = self.start_time + + self.running = True + + # Taken from the demo application. + def get_depthai_device_info(self, device_id: string): + device_infos = depthai.Device.getAllAvailableDevices() + if len(device_infos) == 0: + raise RuntimeError("No DepthAI (Oak-D-Lite) device (camera) found!") + else: + print("Available devices:") + for i, deviceInfo in enumerate(device_infos): + print(f"[{i}] {deviceInfo.getMxId()} [{deviceInfo.state.name}]") + + # Set the deviceId to "list" in order to list the connected devices' ids. + if device_id == "list": + raise SystemExit(0) + elif device_id is not None: + matching_device = next( + filter(lambda info: info.getMxId() == device_id, device_infos), None + ) + if matching_device is None: + raise RuntimeError( + f"No DepthAI device found with id matching {device_id} !" + ) + return matching_device + elif len(device_infos) == 1: + return device_infos[0] + else: + val = input("Which DepthAI Device you want to use: ") + try: + return device_infos[int(val)] + except: + raise ValueError(f"Incorrect value supplied: {val}") + + def setup_depth_camera(self, width, height): + # Set up left and right cameras + mono_left = self.get_mono_camera(self.pipeline, True) + mono_right = self.get_mono_camera(self.pipeline, False) + + # Combine left and right cameras to form a stereo pair + stereo: depthai.node.StereoDepth = self.get_stereo_pair( + self.pipeline, mono_left, mono_right + ) + + # Define and name output depth map + xout_depth = self.pipeline.createXLinkOut() + xout_depth.setStreamName("depth") + + stereo.depth.link(xout_depth.input) + + def setup_rgb_camera(self, width, height): + cam_rgb = self.pipeline.create(depthai.node.ColorCamera) + + res = depthai.ColorCameraProperties.SensorResolution.THE_1080_P + + cam_rgb.setResolution(res) + cam_rgb.setVideoSize(width, height) + + xout_rgb = self.pipeline.create(depthai.node.XLinkOut) + xout_rgb.setStreamName("rgb") + + cam_rgb.video.link(xout_rgb.input) + + def get_mono_camera(self, pipeline: Pipeline, is_left: bool): + # Configure mono camera + mono = pipeline.createMonoCamera() + + # Set camera resolution + mono.setResolution(depthai.MonoCameraProperties.SensorResolution.THE_480_P) + + if is_left: + # Get left camera + mono.setBoardSocket(depthai.CameraBoardSocket.LEFT) + else: + # Get right camera + mono.setBoardSocket(depthai.CameraBoardSocket.RIGHT) + + return mono + + def get_stereo_pair(self, pipeline: Pipeline, mono_left, mono_right): + # Configure the stereo pair for depth estimation + new_stereo = pipeline.createStereoDepth() + # Checks occluded pixels and marks them as invalid + new_stereo.setLeftRightCheck(True) + + # Configure left and right cameras to work as a stereo pair + mono_left.out.link(new_stereo.left) + mono_right.out.link(new_stereo.right) + + return new_stereo + + def get_frame(self, queue: DataOutputQueue): + # Get frame from queue + new_frame: ImgFrame = queue.get() + # Convert to OpenCV format + return new_frame.getCvFrame() + + def _poll(self): + last_time = self.frame_time + self.frame_time = time.time() - self.start_time + self.frame_count += 1 + + # + # convert camera frames to images + # + if self.enable_rgb or self.enable_depth: + + self.depth_queue: DataOutputQueue = self.oak_d_device.getOutputQueue( + name="depth", maxSize=1, blocking=False + ) + self.rgb_queue: DataOutputQueue = self.oak_d_device.getOutputQueue( + "rgb", maxSize=1, blocking=False + ) + + depth_frame = self.get_frame(self.depth_queue) + rgb_frame = self.get_frame(self.rgb_queue) + + self.depth_image = depth_frame + self.color_image = rgb_frame + + if self.resize: + if self.width != WIDTH or self.height != HEIGHT: + import cv2 + + self.color_image = ( + cv2.resize( + self.color_image, (self.width, self.height), cv2.INTER_NEAREST + ) + if self.enable_rgb + else None + ) + self.depth_image = ( + cv2.resize( + self.depth_image, (self.width, self.height), cv2.INTER_NEAREST + ) + if self.enable_depth + else None + ) + + def update(self): + """ + When running threaded, update() is called from the background thread + to update the state. run_threaded() is called to return the latest state. + """ + while self.running: + self._poll() + + def run_threaded(self): + """ + Return the latest state read by update(). This will not block. + All 4 states are returned, but may be None if the feature is not enabled when the camera part is constructed. + For gyroscope, x is pitch, y is yaw and z is roll. + :return: (rbg_image: nparray, depth_image: nparray, acceleration: (x:float, y:float, z:float), gyroscope: (x:float, y:float, z:float)) + """ + return self.color_image, self.depth_image + + def run(self): + """ + Read and return frame from camera. This will block while reading the frame. + see run_threaded() for return types. + """ + self._poll() + return self.run_threaded() + + def shutdown(self): + self.running = False + time.sleep(2) # give thread enough time to shutdown + + # done running + self.oak_d_device.close() + + +# +# self test +# +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + + parser.add_argument( + "--rgb", default=False, action="store_true", help="Stream RGB camera" + ) + parser.add_argument( + "--depth", default=False, action="store_true", help="Stream depth camera" + ) + parser.add_argument( + "--device_id", + help='Camera id (if more than one camera connected), or "list" to print the connected device ids', + ) + args = parser.parse_args() + + if not (args.rgb or args.depth): + print("Must specify one or more of --rgb, --depth") + parser.print_help() + sys.exit(0) + + show_opencv_window = ( + args.rgb or args.depth + ) # True to show images in opencv window: note that default donkeycar environment is not configured for this. + if show_opencv_window: + import cv2 + + enable_rgb = args.rgb + enable_depth = args.depth + + devices = depthai.Device.getAllAvailableDevices() + + device_id = args.device_id # getMxId + + width = 640 + height = 480 + channels = 3 + + profile_frames = 0 # set to non-zero to calculate the max frame rate using given number of frames + + camera = None + try: + camera = OakDLite( + width=width, + height=height, + enable_rgb=enable_rgb, + enable_depth=enable_depth, + device_id=device_id, + ) + + frame_count = 0 + start_time = time.time() + frame_time = start_time + while True: + # + # read data from camera + # + color_image, depth_image = camera.run() + + # maintain frame timing + frame_count += 1 + last_time = frame_time + frame_time = time.time() + + # Show images + if show_opencv_window and not profile_frames: + cv2.namedWindow("Oak-D", cv2.WINDOW_AUTOSIZE) + if enable_rgb or enable_depth: + # make sure depth and color images have same number of channels so we can show them together in the window + if 3 == channels: + depth_colormap = ( + cv2.applyColorMap( + cv2.convertScaleAbs(depth_image, alpha=0.03), + cv2.COLORMAP_JET, + ) + if enable_depth + else None + ) + else: + depth_colormap = ( + cv2.cvtColor( + cv2.applyColorMap( + cv2.convertScaleAbs(depth_image, alpha=0.03), + cv2.COLORMAP_JET, + ), + cv2.COLOR_RGB2GRAY, + ) + if enable_depth + else None + ) + + # Stack both images horizontally (i.e. side by side). + images = None + if enable_rgb: + images = ( + np.hstack((color_image, depth_colormap)) + if enable_depth + else color_image + ) + elif enable_depth: + images = depth_colormap + + if images is not None: + cv2.imshow("Oak-D", images) + + # Press esc or 'q' to close the image window + key = cv2.waitKey(1) + if key & 0xFF == ord("q") or key == 27: + cv2.destroyAllWindows() + break + if profile_frames > 0: + if frame_count == profile_frames: + print( + f"Acquired {frame_count} frames in {frame_time - start_time} seconds for {frame_count / (frame_time - start_time)} fps" + ) + + break + else: + time.sleep(0.05) + finally: + if camera is not None: + camera.shutdown() diff --git a/donkeycar/parts/oled.py b/donkeycar/parts/oled.py index 2ce137a46..f8cd768cf 100644 --- a/donkeycar/parts/oled.py +++ b/donkeycar/parts/oled.py @@ -1,161 +1,200 @@ -# requires the Adafruit ssd1306 library: pip install adafruit-circuitpython-ssd1306 - -import subprocess -import time -from board import SCL, SDA -import busio -from PIL import Image, ImageDraw, ImageFont -import adafruit_ssd1306 - - -class OLEDDisplay(object): - ''' - Manages drawing of text on the OLED display. - ''' - def __init__(self, rotation=0, resolution=1): - # Placeholder - self._EMPTY = '' - # Total number of lines of text - self._SLOT_COUNT = 4 - self.slots = [self._EMPTY] * self._SLOT_COUNT - self.display = None - self.rotation = rotation - if resolution == 2: - self.height = 64 - else: - self.height = 32 - - def init_display(self): - ''' - Initializes the OLED display. - ''' - if self.display is None: - # Create the I2C interface. - i2c = busio.I2C(SCL, SDA) - # Create the SSD1306 OLED class. - # The first two parameters are the pixel width and pixel height. Change these - # to the right size for your display! - self.display = adafruit_ssd1306.SSD1306_I2C(128, self.height, i2c) - self.display.rotation = self.rotation - - - self.display.fill(0) - self.display.show() - - # Create blank image for drawing. - # Make sure to create image with mode '1' for 1-bit color. - self.width = self.display.width - self.image = Image.new("1", (self.width, self.height)) - - # Get drawing object to draw on image. - self.draw = ImageDraw.Draw(self.image) - - # Draw a black filled box to clear the image. - self.draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) - # Load Fonts - self.font = ImageFont.load_default() - self.clear_display() - - def clear_display(self): - if self.draw is not None: - self.draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) - - def update_slot(self, index, text): - if index < len(self.slots): - self.slots[index] = text - - def clear_slot(self, index): - if index < len(self.slots): - self.slots[index] = self._EMPTY - - def update(self): - '''Display text''' - x = 0 - top = -2 - self.clear_display() - for i in range(self._SLOT_COUNT): - text = self.slots[i] - if len(text) > 0: - self.draw.text((x, top), text, font=self.font, fill=255) - top += 8 - - # Update - self.display.rotation = self.rotation - self.display.image(self.image) - self.display.show() - - -class OLEDPart(object): - ''' - The part that updates status on the oled display. - ''' - def __init__(self, rotation, resolution, auto_record_on_throttle=False): - self.oled = OLEDDisplay(rotation, resolution) - self.oled.init_display() - self.on = False - if auto_record_on_throttle: - self.recording = 'AUTO' - else: - self.recording = 'NO' - self.num_records = 0 - self.user_mode = None - eth0 = OLEDPart.get_ip_address('eth0') - wlan0 = OLEDPart.get_ip_address('wlan0') - if eth0 is not None: - self.eth0 = 'eth0:%s' % (eth0) - else: - self.eth0 = None - if wlan0 is not None: - self.wlan0 = 'wlan0:%s' % (wlan0) - else: - self.wlan0 = None - - def run(self): - if not self.on: - self.on = True - - def run_threaded(self, recording, num_records, user_mode): - if num_records is not None and num_records > 0: - self.num_records = num_records - - if recording: - self.recording = 'YES (Records = %s)' % (self.num_records) - else: - self.recording = 'NO (Records = %s)' % (self.num_records) - - self.user_mode = 'User Mode (%s)' % (user_mode) - - def update_slots(self): - updates = [self.eth0, self.wlan0, self.recording, self.user_mode] - index = 0 - # Update slots - for update in updates: - if update is not None: - self.oled.update_slot(index, update) - index += 1 - - # Update display - self.oled.update() - - def update(self): - self.on = True - # Run threaded loop by itself - while self.on: - self.update_slots() - - def shutdown(self): - self.oled.clear_display() - self.on = False - - # https://github.com/NVIDIA-AI-IOT/jetbot/blob/master/jetbot/utils/utils.py - - @classmethod - def get_ip_address(cls, interface): - if OLEDPart.get_network_interface_state(interface) == 'down': - return None - cmd = "ifconfig %s | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1'" % interface - return subprocess.check_output(cmd, shell=True).decode('ascii')[:-1] - - @classmethod - def get_network_interface_state(cls, interface): - return subprocess.check_output('cat /sys/class/net/%s/operstate' % interface, shell=True).decode('ascii')[:-1] +# requires the Adafruit ssd1306 library: pip install adafruit-circuitpython-ssd1306 + + +import os +import re +import subprocess +import time +from board import SCL, SDA +import busio +from PIL import Image, ImageDraw, ImageFont +import adafruit_ssd1306 + + +class OLEDDisplay(object): + ''' + Manages drawing of text on the OLED display. + ''' + def __init__(self, rotation=0, resolution=1): + # Placeholder + self._EMPTY = '' + # Total number of lines of text + self._SLOT_COUNT = 4 + self.slots = [self._EMPTY] * self._SLOT_COUNT + self.display = None + self.rotation = rotation + if resolution == 2: + self.height = 64 + else: + self.height = 32 + + def init_display(self): + ''' + Initializes the OLED display. + ''' + if self.display is None: + # Create the I2C interface. + i2c = busio.I2C(SCL, SDA) + # Create the SSD1306 OLED class. + # The first two parameters are the pixel width and pixel height. Change these + # to the right size for your display! + self.display = adafruit_ssd1306.SSD1306_I2C(128, self.height, i2c) + self.display.rotation = self.rotation + + self.display.fill(0) + self.display.show() + + # Create blank image for drawing. + # Make sure to create image with mode '1' for 1-bit color. + self.width = self.display.width + self.image = Image.new("1", (self.width, self.height)) + + # Get drawing object to draw on image. + self.draw = ImageDraw.Draw(self.image) + + # Draw a black filled box to clear the image. + self.draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) + # Load Fonts + self.font = ImageFont.load_default() + self.clear_display() + + def clear_display(self): + if self.draw is not None: + self.draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0) + + def update_slot(self, index, text): + if index < len(self.slots): + self.slots[index] = text + + def clear_slot(self, index): + if index < len(self.slots): + self.slots[index] = self._EMPTY + + def update(self): + '''Display text''' + x = 0 + top = -2 + self.clear_display() + for i in range(self._SLOT_COUNT): + text = self.slots[i] + if len(text) > 0: + self.draw.text((x, top), text, font=self.font, fill=255) + top += 8 + + # Update + self.display.rotation = self.rotation + self.display.image(self.image) + self.display.show() + + +class OLEDPart(object): + ''' + The part that updates status on the oled display. + ''' + def __init__(self, rotation, resolution, auto_record_on_throttle=False): + self.oled = OLEDDisplay(rotation, resolution) + self.oled.init_display() + self.on = False + if auto_record_on_throttle: + self.recording = 'AUTO' + else: + self.recording = 'NO' + self.num_records = 0 + self.user_mode = None + + # Bookworm / systemd often doesn't have "eth0" (predictable interface names). + # Only query interfaces that actually exist to avoid crashing. + eth0 = None + wlan0 = None + + if os.path.exists('/sys/class/net/eth0'): + eth0 = OLEDPart.get_ip_address('eth0') + if os.path.exists('/sys/class/net/wlan0'): + wlan0 = OLEDPart.get_ip_address('wlan0') + + if eth0: + self.eth0 = f'eth0:{eth0}' + else: + self.eth0 = None + + if wlan0: + self.wlan0 = f'wlan0:{wlan0}' + else: + self.wlan0 = None + + def run(self): + if not self.on: + self.on = True + + def run_threaded(self, recording, num_records, user_mode): + if num_records is not None and num_records > 0: + self.num_records = num_records + + if recording: + self.recording = 'YES (Records = %s)' % (self.num_records) + else: + self.recording = 'NO (Records = %s)' % (self.num_records) + + self.user_mode = 'User Mode (%s)' % (user_mode) + + def update_slots(self): + updates = [self.eth0, self.wlan0, self.recording, self.user_mode] + index = 0 + # Update slots + for update in updates: + if update is not None: + self.oled.update_slot(index, update) + index += 1 + + # Update display + self.oled.update() + + def update(self): + self.on = True + # Run threaded loop by itself + while self.on: + self.update_slots() + + def shutdown(self): + self.oled.clear_display() + self.on = False + + @classmethod + def get_ip_address(cls, interface): + # If interface is missing or down, don't crash. + if cls.get_network_interface_state(interface) != 'up': + return None + + # Prefer `ip` (present on Bookworm Lite) over `ifconfig` (often not installed). + try: + out = subprocess.check_output( + ["ip", "-4", "addr", "show", "dev", interface], + stderr=subprocess.DEVNULL, + text=True, + ) + except Exception: + return None + + # Parse e.g. "inet 192.168.86.62/24 ..." + m = re.search(r"\binet\s+([0-9]+(?:\.[0-9]+){3})/", out) + if not m: + return None + ip = m.group(1) + if ip == "127.0.0.1": + return None + return ip + + @classmethod + def get_network_interface_state(cls, interface): + # Return 'down' for missing interfaces instead of throwing. + path = f"/sys/class/net/{interface}/operstate" + try: + with open(path, "r") as f: + state = f.read().strip() + except FileNotFoundError: + return "down" + except Exception: + return "down" + + # Normalize common values: up/down/unknown/dormant... + return state if state else "down" diff --git a/donkeycar/parts/robohat.py b/donkeycar/parts/robohat.py index 768787a7f..935ae01a7 100755 --- a/donkeycar/parts/robohat.py +++ b/donkeycar/parts/robohat.py @@ -180,9 +180,13 @@ class RoboHATDriver: This is developed by Robotics Masters """ - def __init__(self, cfg, debug=False): + def __init__(self, cfg, serial_port=None, debug=False): # Initialise the Robo HAT using the serial port - self.pwm = serial.Serial(cfg.MM1_SERIAL_PORT, 115200, timeout=1) + # Use shared serial port if provided to avoid opening the same port twice + if serial_port is not None: + self.pwm = serial_port + else: + self.pwm = serial.Serial(cfg.MM1_SERIAL_PORT, 115200, timeout=1) self.MAX_FORWARD = cfg.MM1_MAX_FORWARD self.MAX_REVERSE = cfg.MM1_MAX_REVERSE self.STOPPED_PWM = cfg.MM1_STOPPED_PWM diff --git a/donkeycar/pipeline/augmentations.py b/donkeycar/pipeline/augmentations.py index a421d0e4c..151b24539 100644 --- a/donkeycar/pipeline/augmentations.py +++ b/donkeycar/pipeline/augmentations.py @@ -2,7 +2,8 @@ import logging import albumentations as A from albumentations import GaussianBlur -from albumentations.augmentations.transforms import RandomBrightnessContrast +from albumentations.augmentations import RandomBrightnessContrast + from donkeycar.config import Config @@ -11,14 +12,14 @@ class ImageAugmentation: - def __init__(self, cfg, key, prob=0.5, always_apply=False): + def __init__(self, cfg, key, prob=0.5): aug_list = getattr(cfg, key, []) - augmentations = [ImageAugmentation.create(a, cfg, prob, always_apply) + augmentations = [ImageAugmentation.create(a, cfg, prob) for a in aug_list] self.augmentations = A.Compose(augmentations) @classmethod - def create(cls, aug_type: str, config: Config, prob, always) -> \ + def create(cls, aug_type: str, config: Config, prob) -> \ albumentations.core.transforms_interface.BasicTransform: """ Augmentation factory. Cropping and trapezoidal mask are transformations which should be applied in training, validation @@ -30,13 +31,13 @@ def create(cls, aug_type: str, config: Config, prob, always) -> \ logger.info(f'Creating augmentation {aug_type} {b_limit}') return RandomBrightnessContrast(brightness_limit=b_limit, contrast_limit=b_limit, - p=prob, always_apply=always) + p=prob) elif aug_type == 'BLUR': b_range = getattr(config, 'AUG_BLUR_RANGE', 3) logger.info(f'Creating augmentation {aug_type} {b_range}') return GaussianBlur(sigma_limit=b_range, blur_limit=(13, 13), - p=prob, always_apply=always) + p=prob) # Parts interface def run(self, img_arr): diff --git a/donkeycar/templates/cfg_basic.py b/donkeycar/templates/cfg_basic.py index f718e8ae4..eee8bf1ac 100755 --- a/donkeycar/templates/cfg_basic.py +++ b/donkeycar/templates/cfg_basic.py @@ -25,7 +25,7 @@ MAX_LOOPS = None #CAMERA -CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|MOCK|IMAGE_LIST) +CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|OAKD|MOCK|IMAGE_LIST) IMAGE_W = 160 IMAGE_H = 120 IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono diff --git a/donkeycar/templates/cfg_complete.py b/donkeycar/templates/cfg_complete.py index d468f6b3c..6ef50ac35 100644 --- a/donkeycar/templates/cfg_complete.py +++ b/donkeycar/templates/cfg_complete.py @@ -9,48 +9,127 @@ import dk cfg = dk.load_config(config_path='~/mycar/config.py') print(cfg.CAMERA_RESOLUTION) - """ - import os -#PATHS +# ============================================================================== +# 1. HARDWARE CONFIGURATION & I/O +# (Camera, Drive Train, Sensors, Pins, Display) +# ============================================================================== + +# PATHS CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(CAR_PATH, 'data') MODELS_PATH = os.path.join(CAR_PATH, 'models') -#VEHICLE -DRIVE_LOOP_HZ = 20 # the vehicle loop will pause if faster than this speed. -MAX_LOOPS = None # the vehicle loop can abort after this many iterations, when given a positive integer. - -#CAMERA -CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|MOCK|IMAGE_LIST) +# ------------------------------------------------------------------------------ +# CAMERA SETUP +# ------------------------------------------------------------------------------ + +# Select the camera type. +# 'PICAM': Raspberry Pi Camera (CSI) +# 'WEBCAM': USB Camera +# 'CVCAM': OpenCV Camera (often same as WEBCAM) +# 'CSIC': High-speed CSI camera (e.g. Arducam) +# 'D435': Intel Realsense D435 +# 'OAKD': Luxonis OAK-D +# 'MOCK': Simulation/Testing or when using GPS path following +CAMERA_TYPE = "PICAM" + +# The resolution of the input image. Higher resolution needs more processing power. IMAGE_W = 160 IMAGE_H = 120 -IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono -CAMERA_FRAMERATE = DRIVE_LOOP_HZ + +# The depth of the image. 3 for RGB, 1 for Greyscale. +IMAGE_DEPTH = 3 + +# The framerate of the camera. Should generally match DRIVE_LOOP_HZ. +CAMERA_FRAMERATE = 20 + +# Flip the image vertically (useful if camera is mounted upside down). CAMERA_VFLIP = False + +# Flip the image horizontally. CAMERA_HFLIP = False -CAMERA_INDEX = 0 # used for 'WEBCAM' and 'CVCAM' when there is more than one camera connected -# For CSIC camera - If the camera is mounted in a rotated position, changing the below parameter will correct the output frame orientation -CSIC_CAM_GSTREAMER_FLIP_PARM = 0 # (0 => none , 4 => Flip horizontally, 6 => Flip vertically) -BGR2RGB = False # true to convert from BRG format to RGB format; requires opencv -SHOW_PILOT_IMAGE = False # show the image used to do the inference when in autopilot mode -# For IMAGE_LIST camera -# PATH_MASK = "~/mycar/data/tub_1_20-03-12/*.jpg" +# Used for 'WEBCAM' and 'CVCAM' when there is more than one camera connected. +CAMERA_INDEX = 0 -#9865, over rides only if needed, ie. TX2.. -PCA9685_I2C_ADDR = 0x40 #I2C address, use i2cdetect to validate this number -PCA9685_I2C_BUSNUM = None #None will auto detect, which is fine on the pi. But other platforms should specify the bus num. +# CSIC Camera: 0=None, 4=Flip Horizontal, 6=Flip Vertical. +CSIC_CAM_GSTREAMER_FLIP_PARM = 0 -#SSD1306_128_32 -USE_SSD1306_128_32 = False # Enable the SSD_1306 OLED Display -SSD1306_128_32_I2C_ROTATION = 0 # 0 = text is right-side up, 1 = rotated 90 degrees clockwise, 2 = 180 degrees (flipped), 3 = 270 degrees -SSD1306_RESOLUTION = 1 # 1 = 128x32; 2 = 128x64 +# Convert Blue-Green-Red (OpenCV default) to Red-Green-Blue. +BGR2RGB = False + +# Intel Realsense D435 specific settings +REALSENSE_D435_RGB = True # True to capture RGB image +REALSENSE_D435_DEPTH = True # True to capture depth as image array +REALSENSE_D435_IMU = False # True to capture IMU data (D435i only) +REALSENSE_D435_ID = None # Serial number of camera or None for auto-detect + +# OAK-D Camera specific settings +OAKD_RGB = True # True to capture RGB image +OAKD_DEPTH = True # True to capture depth as image array +OAKD_ID = None # Serial number of camera or None for auto-detect + + +# ------------------------------------------------------------------------------ +# I2C & DISPLAY +# ------------------------------------------------------------------------------ + +# I2C address of the PCA9685 servo driver (standard is 0x40). +PCA9685_I2C_ADDR = 0x40 + +# I2C bus number. None will auto-detect (usually 1 on Pi). +PCA9685_I2C_BUSNUM = None + +# Enable the SSD1306 OLED display (small screen on the car). +USE_SSD1306_128_32 = False + +# OLED Rotation: 0 = normal, 1 = 90 deg, 2 = 180 deg, 3 = 270 deg. +SSD1306_128_32_I2C_ROTATION = 0 + +# OLED Resolution: 1 = 128x32, 2 = 128x64. +SSD1306_RESOLUTION = 1 + + +# ------------------------------------------------------------------------------ +# INPUT DEVICES (JOYSTICK / CONTROLLER) +# ------------------------------------------------------------------------------ + +# If True, the joystick is enabled by default without needing '--js' flag. +USE_JOYSTICK_AS_DEFAULT = True + +# The maximum throttle output (0.0 to 1.0) allowed by the joystick. +# Useful for limiting speed for beginners. +JOYSTICK_MAX_THROTTLE = 0.5 + +# Scalar for steering. 1.0 is normal. <1.0 is less sensitive. +JOYSTICK_STEERING_SCALE = 1.0 + +# The "deadzone" where small joystick movements are ignored (0.0 to 1.0). +JOYSTICK_DEADZONE = 0.01 + +# Set to -1.0 to flip forward/backward direction on the joystick. +JOYSTICK_THROTTLE_DIR = -1.0 + +# The linux device file for the joystick. +JOYSTICK_DEVICE_FILE = "/dev/input/js0" + +# The type of controller being used. +# Options: 'ps3', 'ps4', 'xbox', 'nimbus', 'wiiu', 'F710', 'rc3', 'MM1 (use for RC Hat)', 'custom' +CONTROLLER_TYPE = 'xbox' + +# Enable listening for remote joystick control over the network. +USE_NETWORKED_JS = False +NETWORK_JS_SERVER_IP = None + + +# ------------------------------------------------------------------------------ +# DRIVE TRAIN CONFIGURATION +# ------------------------------------------------------------------------------ -# # DRIVE_TRAIN_TYPE # These options specify which chasis and motor setup you are using. # See Actuators documentation https://docs.donkeycar.com/parts/actuators/ @@ -66,55 +145,37 @@ # "DC_TWO_WHEEL_L298N" using HBridge in 3-pin mode to control two drive motors, one of the left and one on the right. # "MOCK" no drive train. This can be used to test other features in a test rig. # "VESC" VESC Motor controller to set servo angle and duty cycle -# (deprecated) "SERVO_HBRIDGE_PWM" use ServoBlaster to output pwm control from the PiZero directly to control steering, -# and HBridge for a drive motor. -# (deprecated) "PIGPIO_PWM" uses Raspberrys internal PWM -# (deprecated) "I2C_SERVO" uses PCA9685 servo controller to control a steering servo and an ESC, as in a standard RC car -# + +# Select the drive train type. This determines how the software talks to the motors. +# "PWM_STEERING_THROTTLE": Standard RC car (Servo + ESC) +# "MM1": RoboHat MM1 or RC Hat +# "SERVO_HBRIDGE_2PIN": Servo for steering, HBridge (2 pin) for motor +# "SERVO_HBRIDGE_3PIN": Servo for steering, HBridge (3 pin) for motor +# "DC_STEER_THROTTLE": DC Motor for steering, DC Motor for drive (L298N) +# "DC_TWO_WHEEL": Differential drive (tank style), 2 Pin HBridge +# "DC_TWO_WHEEL_L298N": Differential drive (tank style), 3 Pin HBridge +# "VESC": VESC Motor Controller DRIVE_TRAIN_TYPE = "PWM_STEERING_THROTTLE" -# -# PWM_STEERING_THROTTLE -# +# Configuration for PWM_STEERING_THROTTLE (Standard RC Car) # Drive train for RC car with a steering servo and ESC. # Uses a PwmPin for steering (servo) and a second PwmPin for throttle (ESC) # Base PWM Frequence is presumed to be 60hz; use PWM_xxxx_SCALE to adjust pulse with for non-standard PWM frequencies -# +# Requires calibration using 'donkey calibrate'. PWM_STEERING_THROTTLE = { - "PWM_STEERING_PIN": "PCA9685.1:40.1", # PWM output pin for steering servo - "PWM_STEERING_SCALE": 1.0, # used to compensate for PWM frequency differents from 60hz; NOT for adjusting steering range - "PWM_STEERING_INVERTED": False, # True if hardware requires an inverted PWM pulse - "PWM_THROTTLE_PIN": "PCA9685.1:40.0", # PWM output pin for ESC - "PWM_THROTTLE_SCALE": 1.0, # used to compensate for PWM frequence differences from 60hz; NOT for increasing/limiting speed - "PWM_THROTTLE_INVERTED": False, # True if hardware requires an inverted PWM pulse - "STEERING_LEFT_PWM": 460, #pwm value for full left steering - "STEERING_RIGHT_PWM": 290, #pwm value for full right steering - "THROTTLE_FORWARD_PWM": 500, #pwm value for max forward throttle - "THROTTLE_STOPPED_PWM": 370, #pwm value for no movement - "THROTTLE_REVERSE_PWM": 220, #pwm value for max reverse throttle + "PWM_STEERING_PIN": "PCA9685.1:40.1", # Pin for steering servo + "PWM_STEERING_SCALE": 1.0, # PWM frequency compensation + "PWM_STEERING_INVERTED": False, # Invert steering direction + "PWM_THROTTLE_PIN": "PCA9685.1:40.0", # Pin for ESC (Throttle) + "PWM_THROTTLE_SCALE": 1.0, # PWM frequency compensation + "PWM_THROTTLE_INVERTED": False, # Invert throttle direction + "STEERING_LEFT_PWM": 460, # Calibrated value: Full Left + "STEERING_RIGHT_PWM": 290, # Calibrated value: Full Right + "THROTTLE_FORWARD_PWM": 500, # Calibrated value: Max Forward + "THROTTLE_STOPPED_PWM": 370, # Calibrated value: Stopped + "THROTTLE_REVERSE_PWM": 220, # Calibrated value: Max Reverse } -# -# I2C_SERVO (deprecated in favor of PWM_STEERING_THROTTLE) -# -STEERING_CHANNEL = 1 #(deprecated) channel on the 9685 pwm board 0-15 -STEERING_LEFT_PWM = 460 #pwm value for full left steering -STEERING_RIGHT_PWM = 290 #pwm value for full right steering -THROTTLE_CHANNEL = 0 #(deprecated) channel on the 9685 pwm board 0-15 -THROTTLE_FORWARD_PWM = 500 #pwm value for max forward throttle -THROTTLE_STOPPED_PWM = 370 #pwm value for no movement -THROTTLE_REVERSE_PWM = 220 #pwm value for max reverse throttle - -# -# PIGPIO_PWM (deprecated in favor of PWM_STEERING_THROTTLE) -# -STEERING_PWM_PIN = 13 #(deprecated) Pin numbering according to Broadcom numbers -STEERING_PWM_FREQ = 50 #Frequency for PWM -STEERING_PWM_INVERTED = False #If PWM needs to be inverted -THROTTLE_PWM_PIN = 18 #(deprecated) Pin numbering according to Broadcom numbers -THROTTLE_PWM_FREQ = 50 #Frequency for PWM -THROTTLE_PWM_INVERTED = False #If PWM needs to be inverted - # # SERVO_HBRIDGE_2PIN # - configures a steering servo and an HBridge in 2pin mode (2 pwm pins) @@ -145,14 +206,15 @@ # - RPI_GPIO, PIGPIO and PCA9685 can be mixed arbitrarily, # although it is discouraged to mix RPI_GPIO and PIGPIO. # +# Configuration for SERVO_HBRIDGE_2PIN SERVO_HBRIDGE_2PIN = { - "FWD_DUTY_PIN": "RPI_GPIO.BOARD.18", # provides forward duty cycle to motor - "BWD_DUTY_PIN": "RPI_GPIO.BOARD.16", # provides reverse duty cycle to motor - "PWM_STEERING_PIN": "RPI_GPIO.BOARD.33", # provides servo pulse to steering servo - "PWM_STEERING_SCALE": 1.0, # used to compensate for PWM frequency differents from 60hz; NOT for adjusting steering range - "PWM_STEERING_INVERTED": False, # True if hardware requires an inverted PWM pulse - "STEERING_LEFT_PWM": 460, # pwm value for full left steering (use `donkey calibrate` to measure value for your car) - "STEERING_RIGHT_PWM": 290, # pwm value for full right steering (use `donkey calibrate` to measure value for your car) + "FWD_DUTY_PIN": "RPI_GPIO.BOARD.18", # Pin for Forward + "BWD_DUTY_PIN": "RPI_GPIO.BOARD.16", # Pin for Reverse + "PWM_STEERING_PIN": "RPI_GPIO.BOARD.33", # Pin for Servo + "PWM_STEERING_SCALE": 1.0, + "PWM_STEERING_INVERTED": False, + "STEERING_LEFT_PWM": 460, + "STEERING_RIGHT_PWM": 290, } # @@ -189,51 +251,18 @@ # - RPI_GPIO, PIGPIO and PCA9685 can be mixed arbitrarily, # although it is discouraged to mix RPI_GPIO and PIGPIO. # +# Configuration for SERVO_HBRIDGE_3PIN SERVO_HBRIDGE_3PIN = { - "FWD_PIN": "RPI_GPIO.BOARD.18", # ttl pin, high enables motor forward - "BWD_PIN": "RPI_GPIO.BOARD.16", # ttl pin, high enables motor reverse - "DUTY_PIN": "RPI_GPIO.BOARD.35", # provides duty cycle to motor - "PWM_STEERING_PIN": "RPI_GPIO.BOARD.33", # provides servo pulse to steering servo - "PWM_STEERING_SCALE": 1.0, # used to compensate for PWM frequency differents from 60hz; NOT for adjusting steering range - "PWM_STEERING_INVERTED": False, # True if hardware requires an inverted PWM pulse - "STEERING_LEFT_PWM": 460, # pwm value for full left steering (use `donkey calibrate` to measure value for your car) - "STEERING_RIGHT_PWM": 290, # pwm value for full right steering (use `donkey calibrate` to measure value for your car) + "FWD_PIN": "RPI_GPIO.BOARD.18", # Enable Forward + "BWD_PIN": "RPI_GPIO.BOARD.16", # Enable Reverse + "DUTY_PIN": "RPI_GPIO.BOARD.35", # Speed Control (PWM) + "PWM_STEERING_PIN": "RPI_GPIO.BOARD.33", + "PWM_STEERING_SCALE": 1.0, + "PWM_STEERING_INVERTED": False, + "STEERING_LEFT_PWM": 460, + "STEERING_RIGHT_PWM": 290, } -# -# DRIVETRAIN_TYPE == "SERVO_HBRIDGE_PWM" (deprecated in favor of SERVO_HBRIDGE_2PIN) -# - configures a steering servo and an HBridge in 2pin mode (2 pwm pins) -# - Uses ServoBlaster library, which is NOT installed by default, so -# you will need to install it to make this work. -# - Servo takes a standard servo PWM pulse between 1 millisecond (fully reverse) -# and 2 milliseconds (full forward) with 1.5ms being neutral. -# - the motor is controlled by two pwm pins, -# one for forward and one for backward (reverse). -# - the pwm pins produce a duty cycle from 0 (completely LOW) -# to 1 (100% completely high), which is proportional to the -# amount of power delivered to the motor. -# - in forward mode, the reverse pwm is 0 duty_cycle, -# in backward mode, the forward pwm is 0 duty cycle. -# - both pwms are 0 duty cycle (LOW) to 'detach' motor and -# and glide to a stop. -# - both pwms are full duty cycle (100% HIGH) to brake -# -HBRIDGE_PIN_FWD = 18 # provides forward duty cycle to motor -HBRIDGE_PIN_BWD = 16 # provides reverse duty cycle to motor -STEERING_CHANNEL = 0 # PCA 9685 channel for steering control -STEERING_LEFT_PWM = 460 # pwm value for full left steering (use `donkey calibrate` to measure value for your car) -STEERING_RIGHT_PWM = 290 # pwm value for full right steering (use `donkey calibrate` to measure value for your car) - -#VESC controller, primarily need to change VESC_SERIAL_PORT and VESC_MAX_SPEED_PERCENT -VESC_MAX_SPEED_PERCENT =.2 # Max speed as a percent of the actual speed -VESC_SERIAL_PORT= "/dev/ttyACM0" # Serial device to use for communication. Can check with ls /dev/tty* -VESC_HAS_SENSOR= True # Whether or not the bldc motor is using a hall effect sensor -VESC_START_HEARTBEAT= True # Whether or not to automatically start the heartbeat thread that will keep commands alive. -VESC_BAUDRATE= 115200 # baudrate for the serial communication. Shouldn't need to change this. -VESC_TIMEOUT= 0.05 # timeout for the serial communication -VESC_STEERING_SCALE= 0.5 # VESC accepts steering inputs from 0 to 1. Joystick is usually -1 to 1. This changes it to -0.5 to 0.5 -VESC_STEERING_OFFSET = 0.5 # VESC accepts steering inputs from 0 to 1. Coupled with above change we move Joystick to 0 to 1 - # # DC_STEER_THROTTLE with one motor as steering, one as drive # - uses L298N type motor controller in two pin wiring @@ -254,14 +283,14 @@ # - RPI_GPIO, PIGPIO and PCA9685 can be mixed arbitrarily, # although it is discouraged to mix RPI_GPIO and PIGPIO. # +# Configuration for DC_STEER_THROTTLE (Motor for steering, Motor for drive) DC_STEER_THROTTLE = { - "LEFT_DUTY_PIN": "RPI_GPIO.BOARD.18", # pwm pin produces duty cycle for steering left - "RIGHT_DUTY_PIN": "RPI_GPIO.BOARD.16", # pwm pin produces duty cycle for steering right - "FWD_DUTY_PIN": "RPI_GPIO.BOARD.15", # pwm pin produces duty cycle for forward drive - "BWD_DUTY_PIN": "RPI_GPIO.BOARD.13", # pwm pin produces duty cycle for reverse drive + "LEFT_DUTY_PIN": "RPI_GPIO.BOARD.18", # Steer Left + "RIGHT_DUTY_PIN": "RPI_GPIO.BOARD.16", # Steer Right + "FWD_DUTY_PIN": "RPI_GPIO.BOARD.15", # Drive Forward + "BWD_DUTY_PIN": "RPI_GPIO.BOARD.13", # Drive Reverse } -# # DC_TWO_WHEEL pin configuration # - configures L298N_HBridge_2pin driver # - two wheels as differential drive, left and right. @@ -290,14 +319,14 @@ # - RPI_GPIO, PIGPIO and PCA9685 can be mixed arbitrarily, # although it is discouraged to mix RPI_GPIO and PIGPIO. # +# Configuration for DC_TWO_WHEEL (Differential/Tank Drive) DC_TWO_WHEEL = { - "LEFT_FWD_DUTY_PIN": "RPI_GPIO.BOARD.18", # pwm pin produces duty cycle for left wheel forward - "LEFT_BWD_DUTY_PIN": "RPI_GPIO.BOARD.16", # pwm pin produces duty cycle for left wheel reverse - "RIGHT_FWD_DUTY_PIN": "RPI_GPIO.BOARD.15", # pwm pin produces duty cycle for right wheel forward - "RIGHT_BWD_DUTY_PIN": "RPI_GPIO.BOARD.13", # pwm pin produces duty cycle for right wheel reverse + "LEFT_FWD_DUTY_PIN": "RPI_GPIO.BOARD.18", + "LEFT_BWD_DUTY_PIN": "RPI_GPIO.BOARD.16", + "RIGHT_FWD_DUTY_PIN": "RPI_GPIO.BOARD.15", + "RIGHT_BWD_DUTY_PIN": "RPI_GPIO.BOARD.13", } -# # DC_TWO_WHEEL_L298N pin configuration # - configures L298N_HBridge_3pin driver # - two wheels as differential drive, left and right. @@ -329,37 +358,91 @@ # - for example "PCA9685.1:40.13" # - RPI_GPIO, PIGPIO and PCA9685 can be mixed arbitrarily, # although it is discouraged to mix RPI_GPIO and PIGPIO. -# +# Configuration for DC_TWO_WHEEL_L298N (Differential Drive 3-pin) DC_TWO_WHEEL_L298N = { - "LEFT_FWD_PIN": "RPI_GPIO.BOARD.16", # TTL output pin enables left wheel forward - "LEFT_BWD_PIN": "RPI_GPIO.BOARD.18", # TTL output pin enables left wheel reverse - "LEFT_EN_DUTY_PIN": "RPI_GPIO.BOARD.22", # PWM pin generates duty cycle for left motor speed - - "RIGHT_FWD_PIN": "RPI_GPIO.BOARD.15", # TTL output pin enables right wheel forward - "RIGHT_BWD_PIN": "RPI_GPIO.BOARD.13", # TTL output pin enables right wheel reverse - "RIGHT_EN_DUTY_PIN": "RPI_GPIO.BOARD.11", # PWM pin generates duty cycle for right wheel speed + "LEFT_FWD_PIN": "RPI_GPIO.BOARD.16", + "LEFT_BWD_PIN": "RPI_GPIO.BOARD.18", + "LEFT_EN_DUTY_PIN": "RPI_GPIO.BOARD.22", + "RIGHT_FWD_PIN": "RPI_GPIO.BOARD.15", + "RIGHT_BWD_PIN": "RPI_GPIO.BOARD.13", + "RIGHT_EN_DUTY_PIN": "RPI_GPIO.BOARD.11", } -#ODOMETRY -HAVE_ODOM = False # Do you have an odometer/encoder -ENCODER_TYPE = 'GPIO' # What kind of encoder? GPIO|Arduino|Astar -MM_PER_TICK = 12.7625 # How much travel with a single tick, in mm. Roll you car a meter and divide total ticks measured by 1,000 -ODOM_PIN = 13 # if using GPIO, which GPIO board mode pin to use as input -ODOM_DEBUG = False # Write out values on vel and distance as it runs +# Configuration for VESC Motor Controller +VESC_MAX_SPEED_PERCENT = .2 +VESC_SERIAL_PORT = "/dev/ttyACM0" +VESC_HAS_SENSOR = True +VESC_START_HEARTBEAT = True +VESC_BAUDRATE = 115200 +VESC_TIMEOUT = 0.05 +VESC_STEERING_SCALE = 0.5 +VESC_STEERING_OFFSET = 0.5 + +# Configuration for RoboHat MM1 and RC Hat +MM1_STEERING_MID = 1500 +MM1_MAX_FORWARD = 2000 +MM1_STOPPED_PWM = 1500 +MM1_MAX_REVERSE = 1000 +MM1_SHOW_STEERING_VALUE = False +MM1_SERIAL_PORT = '/dev/ttyAMA0' + + +# ------------------------------------------------------------------------------ +# SENSORS & ADD-ONS +# ------------------------------------------------------------------------------ + +# ODOMETRY: Set to True if you have an encoder/odometer installed. +HAVE_ODOM = False +ENCODER_TYPE = 'GPIO' # GPIO|Arduino|Astar +MM_PER_TICK = 12.7625 # Calibration: MM travel per encoder tick +ODOM_PIN = 13 # GPIO pin for encoder +ODOM_DEBUG = False -# #LIDAR +# LIDAR: Set to True if you have a LIDAR (RP or YD). USE_LIDAR = False -LIDAR_TYPE = 'RP' #(RP|YD) -LIDAR_LOWER_LIMIT = 90 # angles that will be recorded. Use this to block out obstructed areas on your car, or looking backwards. Note that for the RP A1M8 Lidar, "0" is in the direction of the motor +LIDAR_TYPE = 'RP' # (RP|YD) +LIDAR_LOWER_LIMIT = 90 # Angle limit to ignore (e.g. looking back at car) LIDAR_UPPER_LIMIT = 270 -# TFMINI +# TFMINI: Short range laser radar. HAVE_TFMINI = False -TFMINI_SERIAL_PORT = "/dev/serial0" # tfmini serial port, can be wired up or use usb/serial adapter +TFMINI_SERIAL_PORT = "/dev/serial0" + +# IMU: Inertial Measurement Unit (e.g. MPU6050). +HAVE_IMU = False +IMU_SENSOR = 'mpu6050' # (mpu6050|mpu9250) +IMU_ADDRESS = 0x68 # I2C address +IMU_DLP_CONFIG = 0 # Digital Lowpass Filter (0-6) + +# SOMBRERO HAT: Enable if using the Sombrero Hat. +HAVE_SOMBRERO = False + +# LEDS: RGB Status LED configuration. +HAVE_RGB_LED = False +LED_INVERT = False # True for Common Anode +LED_PIN_R = 12 +LED_PIN_G = 10 +LED_PIN_B = 16 +LED_R = 0 +LED_G = 0 +LED_B = 1 + +# Hardware Alert Logic (Blink LED when recording count reached) +REC_COUNT_ALERT = 1000 +REC_COUNT_ALERT_CYC = 15 +REC_COUNT_ALERT_BLINK_RATE = 0.4 +RECORD_ALERT_COLOR_ARR = [ (0, (1, 1, 1)), (3000, (5, 5, 5)), (5000, (5, 2, 0)), (10000, (0, 5, 0)), (15000, (0, 5, 5)), (20000, (0, 0, 5)) ] +MODEL_RELOADED_LED_R = 100 +MODEL_RELOADED_LED_G = 0 +MODEL_RELOADED_LED_B = 0 -#TRAINING -# The default AI framework to use. Choose from (tensorflow|pytorch) -DEFAULT_AI_FRAMEWORK = 'tensorflow' + +# ============================================================================== +# 2. AI, MODELS & TRAINING +# (Frameworks, Hyperparams, Transformations, Augmentations) +# ============================================================================== + +# TRAINING FUNDAMENTALS # The DEFAULT_MODEL_TYPE will choose which model will be created at training # time. This chooses between different neural network designs. You can @@ -367,32 +450,91 @@ # python manage.py train and drive commands. # tensorflow models: (linear|categorical|tflite_linear|tensorrt_linear) # pytorch models: (resnet18) +# The AI framework to use (tensorflow|pytorch). +DEFAULT_AI_FRAMEWORK = 'tensorflow' + +# The architecture of the model to use. +# 'linear': Standard regression (predicts steer/throttle floats) +# 'categorical': Classification (bins steer/throttle into categories) +# 'resnet18': Pytorch heavy model DEFAULT_MODEL_TYPE = 'linear' -BATCH_SIZE = 128 #how many records to use when doing one pass of gradient decent. Use a smaller number if your gpu is running out of memory. -TRAIN_TEST_SPLIT = 0.8 #what percent of records to use for training. the remaining used for validation. -MAX_EPOCHS = 100 #how many times to visit all records of your data -SHOW_PLOT = True #would you like to see a pop up display of final loss? -VERBOSE_TRAIN = True #would you like to see a progress bar with text during training? -USE_EARLY_STOP = True #would you like to stop the training if we see it's not improving fit? -EARLY_STOP_PATIENCE = 5 #how many epochs to wait before no improvement -MIN_DELTA = .0005 #early stop will want this much loss change before calling it improved. -PRINT_MODEL_SUMMARY = True #print layers and weights to stdout -OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default -LEARNING_RATE = 0.001 #only used when OPTIMIZER specified -LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified -SEND_BEST_MODEL_TO_PI = False #change to true to automatically send best model during training -CREATE_TF_LITE = True # automatically create tflite model in training -CREATE_TENSOR_RT = False # automatically create tensorrt model in training -SAVE_MODEL_AS_H5 = False # if old keras format should be used instead of savedmodel -CACHE_POLICY = 'ARRAY' # if images are cached as array in training other options are 'NOCACHE' and 'BINARY' - -PRUNE_CNN = False #This will remove weights from your model. The primary goal is to increase performance. -PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning. -PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration. -PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning. -PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model. -# +# Number of training samples per pass. +BATCH_SIZE = 128 + +# Percentage of data used for training vs validation (0.8 = 80% train). +TRAIN_TEST_SPLIT = 0.8 + +# Max training iterations. +MAX_EPOCHS = 100 + +# Show a plot of loss after training. +SHOW_PLOT = True + +# Show text progress bar during training. +VERBOSE_TRAIN = True + +# Stop training early if loss stops improving. +USE_EARLY_STOP = True +EARLY_STOP_PATIENCE = 5 +MIN_DELTA = .0005 + +# Print model summary to console. +PRINT_MODEL_SUMMARY = True + +# Optimizer (None uses default for framework). +OPTIMIZER = None +LEARNING_RATE = 0.001 +LEARNING_RATE_DECAY = 0.0 + +# Store images as 'ARRAY' (faster), 'BINARY', or 'NOCACHE' (saves RAM). +CACHE_POLICY = 'ARRAY' + +# MODEL OPTIMIZATION +# Automatically create TFLite model for faster inference on Pi. +CREATE_TF_LITE = True +CREATE_TENSOR_RT = False +SAVE_MODEL_AS_H5 = False +SEND_BEST_MODEL_TO_PI = False + +# Model Pruning (Remove weights to increase speed). +PRUNE_CNN = False +PRUNE_PERCENT_TARGET = 75 +PRUNE_PERCENT_PER_ITERATION = 20 +PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 +PRUNE_EVAL_PERCENT_OF_DATASET = .05 + +# MODEL SPECIFIC SETTINGS +#Limits the upper bound of the learned throttle for categorical models. +#For the categorical model, this limits the upper bound of the learned throttle +#it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py +#and ideally wouldn't change once set. +MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.8 + +# Number of images in a sequence for RNN/3D models. +SEQUENCE_LENGTH = 3 + +# Transfer Learning options. +FREEZE_LAYERS = False +NUM_LAST_LAYERS_TO_TRAIN = 7 + + +# ------------------------------------------------------------------------------ +# AUGMENTATIONS (Applied randomly ONLY during training) +# ------------------------------------------------------------------------------ +# List of augmentations to apply. e.g. ['MULTIPLY', 'BLUR'] +AUGMENTATIONS = [] + +# Brightness range for augmentation [-0.2, 0.2]. +AUG_BRIGHTNESS_RANGE = 0.2 + +# Blur range for augmentation (kernel size). +AUG_BLUR_RANGE = (0, 3) + + +# ------------------------------------------------------------------------------ +# TRANSFORMATIONS (Applied during Training AND Inference) +# ------------------------------------------------------------------------------ # Augmentations and Transformations # # - Augmentations are changes to the image that are only applied during @@ -482,17 +624,10 @@ # return self.blur.run(image) # ``` # -AUGMENTATIONS = [] # changes to image only applied in training to create - # more variety in the data. -TRANSFORMATIONS = [] # changes applied _before_ training augmentations, - # such that augmentations are applied to the transformed image, -POST_TRANSFORMATIONS = [] # transformations applied _after_ training augmentations, - # such that changes are applied to the augmented image - -# Settings for brightness and blur, use 'MULTIPLY' and/or 'BLUR' in -# AUGMENTATIONS -AUG_BRIGHTNESS_RANGE = 0.2 # this is interpreted as [-0.2, 0.2] -AUG_BLUR_RANGE = (0, 3) + +# Operations applied to the image before it hits the AI. +TRANSFORMATIONS = [] +POST_TRANSFORMATIONS = [] # "CROP" Transformation # Apply mask to borders of the image @@ -508,10 +643,10 @@ # xxxxxxxxxxxxxxxxxxxxx # bottom # xxxxxxxxxxxxxxxxxxxxx # # # # # # # # # # # # # # -ROI_CROP_TOP = 45 # the number of rows of pixels to ignore on the top of the image -ROI_CROP_BOTTOM = 0 # the number of rows of pixels to ignore on the bottom of the image -ROI_CROP_RIGHT = 0 # the number of rows of pixels to ignore on the right of the image -ROI_CROP_LEFT = 0 # the number of rows of pixels to ignore on the left of the image +ROI_CROP_TOP = 45 +ROI_CROP_BOTTOM = 0 +ROI_CROP_RIGHT = 0 +ROI_CROP_LEFT = 0 # "TRAPEZE" tranformation # Apply mask to borders of image @@ -531,161 +666,56 @@ ROI_TRAPEZE_MIN_Y = 60 ROI_TRAPEZE_MAX_Y = 120 -# "CANNY" Canny Edge Detection tranformation -CANNY_LOW_THRESHOLD = 60 # Canny edge detection low threshold value of intensity gradient -CANNY_HIGH_THRESHOLD = 110 # Canny edge detection high threshold value of intensity gradient -CANNY_APERTURE = 3 # Canny edge detect aperture in pixels, must be odd; choices=[3, 5, 7] - -# "BLUR" transformation (not this is SEPARATE from the blur augmentation) -BLUR_KERNEL = 5 # blur kernel horizontal size in pixels -BLUR_KERNEL_Y = None # blur kernel vertical size in pixels or None for square kernel -BLUR_GAUSSIAN = True # blur is gaussian if True, simple if False - -# "RESIZE" transformation -RESIZE_WIDTH = 160 # horizontal size in pixels -RESIZE_HEIGHT = 120 # vertical size in pixels - -# "SCALE" transformation -SCALE_WIDTH = 1.0 # horizontal scale factor -SCALE_HEIGHT = None # vertical scale factor or None to maintain aspect ratio - -#Model transfer options -#When copying weights during a model transfer operation, should we freeze a certain number of layers -#to the incoming weights and not allow them to change during training? -FREEZE_LAYERS = False #default False will allow all layers to be modified by training -NUM_LAST_LAYERS_TO_TRAIN = 7 #when freezing layers, how many layers from the last should be allowed to train? - -#WEB CONTROL -WEB_CONTROL_PORT = int(os.getenv("WEB_CONTROL_PORT", 8887)) # which port to listen on when making a web controller -WEB_INIT_MODE = "user" # which control mode to start in. one of user|local_angle|local. Setting local will start in ai mode. - -#JOYSTICK -USE_JOYSTICK_AS_DEFAULT = False #when starting the manage.py, when True, will not require a --js option to use the joystick -JOYSTICK_MAX_THROTTLE = 0.5 #this scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available. -JOYSTICK_STEERING_SCALE = 1.0 #some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir. -AUTO_RECORD_ON_THROTTLE = True #if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick. -CONTROLLER_TYPE = 'xbox' #(ps3|ps4|xbox|pigpio_rc|nimbus|wiiu|F710|rc3|MM1|custom) custom will run the my_joystick.py controller written by the `donkey createjs` command -USE_NETWORKED_JS = False #should we listen for remote joystick control over the network? -NETWORK_JS_SERVER_IP = None #when listening for network joystick control, which ip is serving this information -JOYSTICK_DEADZONE = 0.01 # when non zero, this is the smallest throttle before recording triggered. -JOYSTICK_THROTTLE_DIR = -1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward -USE_FPV = False # send camera data to FPV webserver -JOYSTICK_DEVICE_FILE = "/dev/input/js0" # this is the unix file use to access the joystick. - -#For the categorical model, this limits the upper bound of the learned throttle -#it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py -#and ideally wouldn't change once set. -MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.8 - -#RNN or 3D -SEQUENCE_LENGTH = 3 #some models use a number of images over time. This controls how many. +# "CANNY" Edge Detection Settings. +CANNY_LOW_THRESHOLD = 60 +CANNY_HIGH_THRESHOLD = 110 +CANNY_APERTURE = 3 -#IMU -HAVE_IMU = False #when true, this add a Mpu6050 part and records the data. Can be used with a -IMU_SENSOR = 'mpu6050' # (mpu6050|mpu9250) -IMU_ADDRESS = 0x68 # if AD0 pin is pulled high them address is 0x69, otherwise it is 0x68 -IMU_DLP_CONFIG = 0 # Digital Lowpass Filter setting (0:250Hz, 1:184Hz, 2:92Hz, 3:41Hz, 4:20Hz, 5:10Hz, 6:5Hz) +# "BLUR" Transformation Settings. +BLUR_KERNEL = 5 +BLUR_KERNEL_Y = None +BLUR_GAUSSIAN = True -#SOMBRERO -HAVE_SOMBRERO = False #set to true when using the sombrero hat from the Donkeycar store. This will enable pwm on the hat. +# "RESIZE" / "SCALE" Settings. +RESIZE_WIDTH = 160 +RESIZE_HEIGHT = 120 +SCALE_WIDTH = 1.0 +SCALE_HEIGHT = None -#PIGPIO RC control -STEERING_RC_GPIO = 26 -THROTTLE_RC_GPIO = 20 -DATA_WIPER_RC_GPIO = 19 -PIGPIO_STEERING_MID = 1500 # Adjust this value if your car cannot run in a straight line -PIGPIO_MAX_FORWARD = 2000 # Max throttle to go fowrward. The bigger the faster -PIGPIO_STOPPED_PWM = 1500 -PIGPIO_MAX_REVERSE = 1000 # Max throttle to go reverse. The smaller the faster -PIGPIO_SHOW_STEERING_VALUE = False -PIGPIO_INVERT = False -PIGPIO_JITTER = 0.025 # threshold below which no signal is reported +# ============================================================================== +# 3. MODES, FEATURES & OPERATION +# (Driving modes, Web/Joystick control, Simulation, Logging) +# ============================================================================== +# VEHICLE LOOP +# The main loop frequency (Hz). Hardware is updated this many times per second. +DRIVE_LOOP_HZ = 20 -#ROBOHAT MM1 -MM1_STEERING_MID = 1500 # Adjust this value if your car cannot run in a straight line -MM1_MAX_FORWARD = 2000 # Max throttle to go fowrward. The bigger the faster -MM1_STOPPED_PWM = 1500 -MM1_MAX_REVERSE = 1000 # Max throttle to go reverse. The smaller the faster -MM1_SHOW_STEERING_VALUE = False -# Serial port -# -- Default Pi: '/dev/ttyS0' -# -- Jetson Nano: '/dev/ttyTHS1' -# -- Google coral: '/dev/ttymxc0' -# -- Windows: 'COM3', Arduino: '/dev/ttyACM0' -# -- MacOS/Linux:please use 'ls /dev/tty.*' to find the correct serial port for mm1 -# eg.'/dev/tty.usbmodemXXXXXX' and replace the port accordingly -MM1_SERIAL_PORT = '/dev/ttyS0' # Serial Port for reading and sending MM1 data. - -#LOGGING -HAVE_CONSOLE_LOGGING = True -LOGGING_LEVEL = 'INFO' # (Python logging level) 'NOTSET' / 'DEBUG' / 'INFO' / 'WARNING' / 'ERROR' / 'FATAL' / 'CRITICAL' -LOGGING_FORMAT = '%(message)s' # (Python logging format - https://docs.python.org/3/library/logging.html#formatter-objects - -#TELEMETRY -HAVE_MQTT_TELEMETRY = False -TELEMETRY_DONKEY_NAME = 'my_robot1234' -TELEMETRY_MQTT_TOPIC_TEMPLATE = 'donkey/%s/telemetry' -TELEMETRY_MQTT_JSON_ENABLE = False -TELEMETRY_MQTT_BROKER_HOST = 'broker.hivemq.com' -TELEMETRY_MQTT_BROKER_PORT = 1883 -TELEMETRY_PUBLISH_PERIOD = 1 -TELEMETRY_LOGGING_ENABLE = True -TELEMETRY_LOGGING_LEVEL = 'INFO' # (Python logging level) 'NOTSET' / 'DEBUG' / 'INFO' / 'WARNING' / 'ERROR' / 'FATAL' / 'CRITICAL' -TELEMETRY_LOGGING_FORMAT = '%(message)s' # (Python logging format - https://docs.python.org/3/library/logging.html#formatter-objects -TELEMETRY_DEFAULT_INPUTS = 'pilot/angle,pilot/throttle,recording' -TELEMETRY_DEFAULT_TYPES = 'float,float' - -# PERF MONITOR -HAVE_PERFMON = False - -#RECORD OPTIONS -RECORD_DURING_AI = False #normally we do not record during ai mode. Set this to true to get image and steering records for your Ai. Be careful not to use them to train. -AUTO_CREATE_NEW_TUB = False #create a new tub (tub_YY_MM_DD) directory when recording or append records to data directory directly - -#LED -HAVE_RGB_LED = False #do you have an RGB LED like https://www.amazon.com/dp/B07BNRZWNF -LED_INVERT = False #COMMON ANODE? Some RGB LED use common anode. like https://www.amazon.com/Xia-Fly-Tri-Color-Emitting-Diffused/dp/B07MYJQP8B - -#LED board pin number for pwm outputs -#These are physical pinouts. See: https://www.raspberrypi-spy.co.uk/2012/06/simple-guide-to-the-rpi-gpio-header-and-pins/ -LED_PIN_R = 12 -LED_PIN_G = 10 -LED_PIN_B = 16 - -#LED status color, 0-100 -LED_R = 0 -LED_G = 0 -LED_B = 1 +# Max loops to run before quitting (useful for testing, None = infinite). +MAX_LOOPS = None -#LED Color for record count indicator -REC_COUNT_ALERT = 1000 #how many records before blinking alert -REC_COUNT_ALERT_CYC = 15 #how many cycles of 1/20 of a second to blink per REC_COUNT_ALERT records -REC_COUNT_ALERT_BLINK_RATE = 0.4 #how fast to blink the led in seconds on/off -#first number is record count, second tuple is color ( r, g, b) (0-100) -#when record count exceeds that number, the color will be used -RECORD_ALERT_COLOR_ARR = [ (0, (1, 1, 1)), - (3000, (5, 5, 5)), - (5000, (5, 2, 0)), - (10000, (0, 5, 0)), - (15000, (0, 5, 5)), - (20000, (0, 0, 5)), ] +# AUTOMATION & BEHAVIORS +# Show the image the pilot sees (with overlays) in the web UI. +SHOW_PILOT_IMAGE = False -#LED status color, 0-100, for model reloaded alert -MODEL_RELOADED_LED_R = 100 -MODEL_RELOADED_LED_G = 0 -MODEL_RELOADED_LED_B = 0 +# Scale all AI throttle output by this multiplier. +AI_THROTTLE_MULT = 1.0 +# "Launch Control": Boost throttle for X seconds at start of autonomous mode. +AI_LAUNCH_DURATION = 0.0 +AI_LAUNCH_THROTTLE = 0.0 +AI_LAUNCH_ENABLE_BUTTON = 'R2' +AI_LAUNCH_KEEP_ENABLED = False #BEHAVIORS #When training the Behavioral Neural Network model, make a list of the behaviors, #Set the TRAIN_BEHAVIORS = True, and use the BEHAVIOR_LED_COLORS to give each behavior a color TRAIN_BEHAVIORS = False BEHAVIOR_LIST = ['Left_Lane', "Right_Lane"] -BEHAVIOR_LED_COLORS = [(0, 10, 0), (10, 0, 0)] #RGB tuples 0-100 per chanel +BEHAVIOR_LED_COLORS = [(0, 10, 0), (10, 0, 0)] #Localizer #The localizer is a neural network that can learn to predict its location on the track. @@ -693,74 +723,101 @@ #to predict the segement of the course, where the course is divided into NUM_LOCATIONS segments. TRAIN_LOCALIZER = False NUM_LOCATIONS = 10 -BUTTON_PRESS_NEW_TUB = False #when enabled, makes it easier to divide our data into one tub per track length if we make a new tub on each X button press. +BUTTON_PRESS_NEW_TUB = False + + +# PATH FOLLOWING (GPS or Odometry based) +PATH_FILENAME = "donkey_path.pkl" +PATH_SCALE = 5.0 +PATH_OFFSET = (0, 0) +PATH_MIN_DIST = 0.3 +PID_P = -10.0 +PID_I = 0.000 +PID_D = -0.2 +PID_THROTTLE = 0.2 +USE_CONSTANT_THROTTLE = False +SAVE_PATH_BTN = "cross" +RESET_ORIGIN_BTN = "triangle" + +# STOP SIGN DETECTOR +STOP_SIGN_DETECTOR = False +STOP_SIGN_MIN_SCORE = 0.2 +STOP_SIGN_SHOW_BOUNDING_BOX = True +STOP_SIGN_MAX_REVERSE_COUNT = 10 +STOP_SIGN_REVERSE_THROTTLE = -0.5 + + +# RECORDING & LOGGING + +# Automatically record data when throttle is > 0 (Standard training data collection). +AUTO_RECORD_ON_THROTTLE = True + +# Record data even when the AI is driving (Careful: don't train on this data!). +RECORD_DURING_AI = False + +# Create a new directory for every session (True) or append to existing (False). +AUTO_CREATE_NEW_TUB = False + +# Console logging settings. +HAVE_CONSOLE_LOGGING = True +LOGGING_LEVEL = 'INFO' +LOGGING_FORMAT = '%(message)s' +HAVE_PERFMON = False +SHOW_FPS = False +FPS_DEBUG_INTERVAL = 10 + + +# TELEMETRY (MQTT) +HAVE_MQTT_TELEMETRY = False +TELEMETRY_DONKEY_NAME = 'my_robot1234' +TELEMETRY_MQTT_TOPIC_TEMPLATE = 'donkey/%s/telemetry' +TELEMETRY_MQTT_JSON_ENABLE = False +TELEMETRY_MQTT_BROKER_HOST = 'broker.hivemq.com' +TELEMETRY_MQTT_BROKER_PORT = 1883 +TELEMETRY_PUBLISH_PERIOD = 1 +TELEMETRY_LOGGING_ENABLE = True +TELEMETRY_LOGGING_LEVEL = 'INFO' +TELEMETRY_LOGGING_FORMAT = '%(message)s' +TELEMETRY_DEFAULT_INPUTS = 'pilot/angle,pilot/throttle,recording' +TELEMETRY_DEFAULT_TYPES = 'float,float' + -#DonkeyGym +# SIMULATION (DONKEY GYM) #Only on Ubuntu linux, you can use the simulator as a virtual donkey and #issue the same python manage.py drive command as usual, but have them control a virtual car. #This enables that, and sets the path to the simualator and the environment. #You will want to download the simulator binary from: https://github.com/tawnkramer/donkey_gym/releases/download/v18.9/DonkeySimLinux.zip #then extract that and modify DONKEY_SIM_PATH. + +# Settings for connecting to the Donkey Gym Unity simulator. DONKEY_GYM = False -DONKEY_SIM_PATH = "path to sim" #"/home/tkramer/projects/sdsandbox/sdsim/build/DonkeySimLinux/donkey_sim.x86_64" when racing on virtual-race-league use "remote", or user "remote" when you want to start the sim manually first. -DONKEY_GYM_ENV_NAME = "donkey-generated-track-v0" # ("donkey-generated-track-v0"|"donkey-generated-roads-v0"|"donkey-warehouse-v0"|"donkey-avc-sparkfun-v0") -GYM_CONF = { "body_style" : "donkey", "body_rgb" : (128, 128, 128), "car_name" : "car", "font_size" : 100} # body style(donkey|bare|car01) body rgb 0-255 +DONKEY_SIM_PATH = "path to sim" +DONKEY_GYM_ENV_NAME = "donkey-generated-track-v0" +GYM_CONF = { "body_style" : "donkey", "body_rgb" : (128, 128, 128), "car_name" : "car", "font_size" : 100} GYM_CONF["racer_name"] = "Your Name" GYM_CONF["country"] = "Place" GYM_CONF["bio"] = "I race robots." - -SIM_HOST = "127.0.0.1" # when racing on virtual-race-league use host "trainmydonkey.com" -SIM_ARTIFICIAL_LATENCY = 0 # this is the millisecond latency in controls. Can use useful in emulating the delay when useing a remote server. values of 100 to 400 probably reasonable. - -# Save info from Simulator (pln) +SIM_HOST = "127.0.0.1" +SIM_ARTIFICIAL_LATENCY = 0 SIM_RECORD_LOCATION = False -SIM_RECORD_GYROACCEL= False +SIM_RECORD_GYROACCEL = False SIM_RECORD_VELOCITY = False SIM_RECORD_LIDAR = False +PUB_CAMERA_IMAGES = False # Publish camera over network +USE_FPV = False # send camera data to FPV webserver -#publish camera over network -#This is used to create a tcp service to publish the camera feed -PUB_CAMERA_IMAGES = False - -#When racing, to give the ai a boost, configure these values. -AI_LAUNCH_DURATION = 0.0 # the ai will output throttle for this many seconds -AI_LAUNCH_THROTTLE = 0.0 # the ai will output this throttle value -AI_LAUNCH_ENABLE_BUTTON = 'R2' # this keypress will enable this boost. It must be enabled before each use to prevent accidental trigger. -AI_LAUNCH_KEEP_ENABLED = False # when False ( default) you will need to hit the AI_LAUNCH_ENABLE_BUTTON for each use. This is safest. When this True, is active on each trip into "local" ai mode. - -#Scale the output of the throttle of the ai pilot for all model types. -AI_THROTTLE_MULT = 1.0 # this multiplier will scale every throttle value for all output from NN models - -#Path following -PATH_FILENAME = "donkey_path.pkl" # the path will be saved to this filename -PATH_SCALE = 5.0 # the path display will be scaled by this factor in the web page -PATH_OFFSET = (0, 0) # 255, 255 is the center of the map. This offset controls where the origin is displayed. -PATH_MIN_DIST = 0.3 # after travelling this distance (m), save a path point -PID_P = -10.0 # proportional mult for PID path follower -PID_I = 0.000 # integral mult for PID path follower -PID_D = -0.2 # differential mult for PID path follower -PID_THROTTLE = 0.2 # constant throttle value during path following -USE_CONSTANT_THROTTLE = False # whether or not to use the constant throttle or variable throttle captured during path recording -SAVE_PATH_BTN = "cross" # joystick button to save path -RESET_ORIGIN_BTN = "triangle" # joystick button to press to move car back to origin - -# Intel Realsense D435 and D435i depth sensing camera -REALSENSE_D435_RGB = True # True to capture RGB image -REALSENSE_D435_DEPTH = True # True to capture depth as image array -REALSENSE_D435_IMU = False # True to capture IMU data (D435i only) -REALSENSE_D435_ID = None # serial number of camera or None if you only have one camera (it will autodetect) - -# Stop Sign Detector -STOP_SIGN_DETECTOR = False -STOP_SIGN_MIN_SCORE = 0.2 -STOP_SIGN_SHOW_BOUNDING_BOX = True -STOP_SIGN_MAX_REVERSE_COUNT = 10 # How many times should the car reverse when detected a stop sign, set to 0 to disable reversing -STOP_SIGN_REVERSE_THROTTLE = -0.5 # Throttle during reversing when detected a stop sign -# FPS counter -SHOW_FPS = False -FPS_DEBUG_INTERVAL = 10 # the interval in seconds for printing the frequency info into the shell - -# PI connection +# PI CONNECTION PI_USERNAME = "pi" PI_HOSTNAME = "donkeypi.local" + + +# WEB CONTROL +# The port for the web server (default 8887). +WEB_CONTROL_PORT = int(os.getenv("WEB_CONTROL_PORT", 8887)) + +# Initial mode on startup. +# 'user': Human control +# 'local_angle': AI Steering, Human Throttle +# 'local': AI Steering and Throttle +WEB_INIT_MODE = "user" diff --git a/donkeycar/templates/cfg_path_follow.py b/donkeycar/templates/cfg_path_follow.py index 4ac8a7895..5d5f2770a 100644 --- a/donkeycar/templates/cfg_path_follow.py +++ b/donkeycar/templates/cfg_path_follow.py @@ -4,7 +4,7 @@ # This file is read by your car application's manage.py script to change the car # performance -# If desired, all config overrides can be specified here. +# If desired, all config overrides can be specified here. # The update operation will not touch this file. # """ @@ -641,9 +641,15 @@ PATH_SCALE = 10.0 # the path display will be scaled by this factor in the web page PATH_OFFSET = (255, 255) # 255, 255 is the center of the map. This offset controls where the origin is displayed. PATH_MIN_DIST = 0.2 # after travelling this distance (m), save a path point -PATH_SEARCH_LENGTH = None # number of points to search for closest point, None to search entire path +PATH_SEARCH_LENGTH = 10 # number of points to search for closest point, None to search entire path + # ideally this is set to a number that is large enough to find the closest point + # but not so large that it finds a point on a different part of the track. + # For instance, if you are racing on a track with long straightaways and tight turns, + # you may want to set this to a number that is large enough to find the closest point + # on the straightaway but not so large that it finds a point on the straightaway + # when you are in the middle of the turn. PATH_LOOK_AHEAD = 1 # number of points ahead of the closest point to include in cte track -PATH_LOOK_BEHIND = 1 # number of points behind the closest point to include in cte track +PATH_LOOK_BEHIND = 1 # number of points behind the closest point to include in cte track PID_P = -0.5 # proportional mult for PID path follower PID_I = 0.000 # integral mult for PID path follower PID_D = -0.3 # differential mult for PID path follower @@ -671,4 +677,3 @@ # Intel Realsense T265 tracking camera REALSENSE_T265_ID = None # serial number of camera or None if you only have one camera (it will autodetect) WHEEL_ODOM_CALIB = "calibration_odometry.json" - diff --git a/donkeycar/templates/cfg_simulator.py b/donkeycar/templates/cfg_simulator.py index ea498fc5d..50da81132 100644 --- a/donkeycar/templates/cfg_simulator.py +++ b/donkeycar/templates/cfg_simulator.py @@ -274,6 +274,10 @@ REALSENSE_D435_IMU = False # True to capture IMU data (D435i only) REALSENSE_D435_ID = None # serial number of camera or None if you only have one camera (it will autodetect) +OAKD_RGB = True # True to capture RGB image +OAKD_DEPTH = True # True to capture depth as image array +OAKD_ID = None # serial number of camera or None if you only have one camera (it will autodetect) + # Stop Sign Detector STOP_SIGN_DETECTOR = False STOP_SIGN_MIN_SCORE = 0.2 diff --git a/donkeycar/templates/complete.py b/donkeycar/templates/complete.py index 32c4a7168..0b7f0a295 100644 --- a/donkeycar/templates/complete.py +++ b/donkeycar/templates/complete.py @@ -499,6 +499,10 @@ def run(self, *components): inputs += ['cam/depth_array'] types += ['gray16_array'] + if cfg.CAMERA_TYPE == "OAKD" and cfg.OAKD_DEPTH: + inputs += ['cam/depth_array'] + types += ['gray16_array'] + if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU): inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'] @@ -873,6 +877,17 @@ def add_camera(V, cfg, camera_type): 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True) + + elif cfg.CAMERA_TYPE == "OAKD": + from donkeycar.parts.oak_d import OakD + cam = OakD( + enable_rgb=cfg.OAKD_RGB, + enable_depth=cfg.OAKD_DEPTH, + device_id=cfg.OAKD_ID) + V.add(cam, inputs=[], + outputs=['cam/image_array', 'cam/depth_array'], + threaded=True) + else: inputs = [] outputs = ['cam/image_array'] @@ -1094,6 +1109,7 @@ def add_drivetrain(V, cfg): elif cfg.DRIVE_TRAIN_TYPE == "MM1": from donkeycar.parts.robohat import RoboHATDriver + # Share serial port with controller to avoid opening the same port twice V.add(RoboHATDriver(cfg), inputs=['steering', 'throttle']) elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM": diff --git a/donkeycar/tests/pytest.ini b/donkeycar/tests/pytest.ini index 078a34f6c..221a9f709 100644 --- a/donkeycar/tests/pytest.ini +++ b/donkeycar/tests/pytest.ini @@ -5,3 +5,4 @@ filterwarnings = log_cli = True log_cli_level = INFO +reruns = 3 diff --git a/donkeycar/tests/test_telemetry.py b/donkeycar/tests/test_telemetry.py index 02a8f6907..a2427e2da 100644 --- a/donkeycar/tests/test_telemetry.py +++ b/donkeycar/tests/test_telemetry.py @@ -2,46 +2,98 @@ # -*- coding: utf-8 -*- import time from unittest import mock -from paho.mqtt.client import Client -from paho.mqtt.enums import CallbackAPIVersion - +from unittest.mock import patch, MagicMock import donkeycar.templates.cfg_complete as cfg from donkeycar.parts.telemetry import MqttTelemetry +import pytest from random import randint -def test_mqtt_telemetry(): - +@patch('donkeycar.parts.telemetry.MQTTClient') +def test_mqtt_telemetry(mock_mqtt_client): + """Test MQTT telemetry functionality with mocked MQTT client""" + + # Setup configuration cfg.TELEMETRY_DEFAULT_INPUTS = 'pilot/angle,pilot/throttle' cfg.TELEMETRY_DONKEY_NAME = 'test{}'.format(randint(0, 1000)) cfg.TELEMETRY_MQTT_JSON_ENABLE = True - # Create receiver - sub = Client(callback_api_version=CallbackAPIVersion.VERSION2, - clean_session=True) - - on_message_mock = mock.Mock() - sub.on_message = on_message_mock - sub.connect(cfg.TELEMETRY_MQTT_BROKER_HOST) - sub.loop_start() - name = "donkey/%s/#" % cfg.TELEMETRY_DONKEY_NAME - sub.subscribe(name) + # Create mock MQTT client + mock_client_instance = MagicMock() + mock_mqtt_client.return_value = mock_client_instance + # Create telemetry instance t = MqttTelemetry(cfg) + + # Verify MQTT client was initialized correctly + mock_mqtt_client.assert_called_once_with(callback_api_version=mock.ANY) + mock_client_instance.connect.assert_called_once_with( + cfg.TELEMETRY_MQTT_BROKER_HOST, + cfg.TELEMETRY_MQTT_BROKER_PORT + ) + mock_client_instance.loop_start.assert_called_once() + + # Test adding step inputs t.add_step_inputs(inputs=['my/voltage'], types=['float']) + expected_inputs = ['pilot/angle', 'pilot/throttle', 'my/voltage'] + expected_types = ['float', 'float', 'float'] + assert t._step_inputs == expected_inputs + assert t._step_types == expected_types + + # Test initial publish (should do nothing as queue is empty) t.publish() + mock_client_instance.publish.assert_not_called() + # Test reporting data timestamp = t.report({'my/speed': 16, 'my/voltage': 12}) + assert isinstance(timestamp, int) + assert t.qsize == 1 + + # Test run method (adds step inputs to queue) t.run(33.3, 22.2, 11.1) assert t.qsize == 2 - time.sleep(1.5) - + # Test publishing with data t.publish() assert t.qsize == 0 + + # Verify publish was called + assert mock_client_instance.publish.called + call_args = mock_client_instance.publish.call_args + topic, payload = call_args[0] + + # Verify topic format + expected_topic = cfg.TELEMETRY_MQTT_TOPIC_TEMPLATE % cfg.TELEMETRY_DONKEY_NAME + assert topic == expected_topic + + # Verify JSON payload structure + import json + payload_data = json.loads(payload) + assert isinstance(payload_data, list) + assert len(payload_data) >= 1 + + # Check that data contains expected keys + data_entry = payload_data[0] + assert 'ts' in data_entry + assert 'values' in data_entry + assert 'my/speed' in data_entry['values'] + assert 'pilot/angle' in data_entry['values'] + assert 'pilot/throttle' in data_entry['values'] + + +def test_mqtt_telemetry_connection_error(): + """Test MQTT telemetry handles connection errors gracefully""" + + cfg.TELEMETRY_DEFAULT_INPUTS = 'pilot/angle,pilot/throttle' + cfg.TELEMETRY_DONKEY_NAME = 'test{}'.format(randint(0, 1000)) + cfg.TELEMETRY_MQTT_JSON_ENABLE = True - time.sleep(0.5) + with patch('donkeycar.parts.telemetry.MQTTClient') as mock_mqtt_client: + # Simulate connection failure + mock_client_instance = MagicMock() + mock_client_instance.connect.side_effect = ConnectionError("Connection failed") + mock_mqtt_client.return_value = mock_client_instance - res = str.encode('[{"ts": %s, "values": {"my/speed": 16, "my/voltage": 11.1, "pilot/angle": 33.3, ' - '"pilot/throttle": 22.2}}]' % timestamp) - assert on_message_mock.call_args_list[0][0][2].payload == res + # Connection error should be raised during initialization + with pytest.raises(ConnectionError): + t = MqttTelemetry(cfg) diff --git a/donkeycar/tests/test_torch.py b/donkeycar/tests/test_torch.py index f6bdd2193..df97be203 100644 --- a/donkeycar/tests/test_torch.py +++ b/donkeycar/tests/test_torch.py @@ -1,3 +1,4 @@ +import importlib.util import pytest import tarfile import os @@ -7,6 +8,7 @@ Data = namedtuple('Data', ['type', 'name', 'convergence', 'pretrained']) +torch_available = importlib.util.find_spec('torch') is not None is_jetson = pytest.mark.skipif( platform.machine() == 'aarch64', @@ -49,8 +51,8 @@ def car_dir(tmpdir_factory): @is_jetson -@pytest.mark.skipif("GITHUB_ACTIONS" in os.environ, - reason='Suppress training test in CI') +@pytest.mark.skipif(not torch_available, + reason='torch not installed') @pytest.mark.parametrize('data', test_data) def test_train(config: Config, car_dir: str, data: Data) -> None: """ @@ -76,8 +78,8 @@ def pilot_path(name): @is_jetson -@pytest.mark.skipif("GITHUB_ACTIONS" in os.environ, - reason='Suppress training test in CI') +@pytest.mark.skipif(not torch_available, + reason='torch not installed') @pytest.mark.parametrize('model_type', ['resnet18']) def test_training_pipeline(config: Config, model_type: str, car_dir: str) \ -> None: diff --git a/donkeycar/tests/test_train.py b/donkeycar/tests/test_train.py index 6f08828fb..849b8a102 100644 --- a/donkeycar/tests/test_train.py +++ b/donkeycar/tests/test_train.py @@ -106,6 +106,8 @@ def car_dir(tmpdir_factory, base_config, imu_fields) -> str: record['localizer/location'] = 3 * count // len(tub) tub_full.write_record(record) count += 1 + tub_full.close() + tub.close() return car_dir @@ -140,6 +142,9 @@ def test_train(config: Config, data: Data) -> None: :param data: test case data :return: None """ + if data.type in ('fastai_linear',): + pytest.importorskip('torch', reason='torch not installed') + def pilot_path(name): pilot_name = f'pilot_{name}.savedmodel' return os.path.join(config.MODELS_PATH, pilot_name) diff --git a/donkeycar/tests/test_web_socket.py b/donkeycar/tests/test_web_socket.py index 4bb0c45b8..4270998d8 100644 --- a/donkeycar/tests/test_web_socket.py +++ b/donkeycar/tests/test_web_socket.py @@ -1,13 +1,10 @@ from tornado import testing import tornado.websocket import tornado.web -import tornado.ioloop import json from unittest.mock import Mock from donkeycar.parts.web_controller.web import WebSocketCalibrateAPI -from time import sleep - -SLEEP = 0.5 +import tornado.gen class WebSocketCalibrateTest(testing.AsyncHTTPTestCase): @@ -24,6 +21,17 @@ def get_app(self): def get_ws_url(self): return "ws://localhost:" + str(self.get_http_port()) + "/" + + async def wait_for_attribute_value(self, obj, attr_name, + expected_value, timeout_seconds=5): + """Poll until an object's attribute equals the expected value or timeout.""" + iterations = int(timeout_seconds / 0.1) + for _ in range(iterations): + if (hasattr(obj, attr_name) + and getattr(obj, attr_name) == expected_value): + return True + await tornado.gen.sleep(0.1) + return False @tornado.testing.gen_test def test_calibrate_servo_esc_1b(self): @@ -31,6 +39,8 @@ def test_calibrate_servo_esc_1b(self): # Now we can run a test on the WebSocket. mock = Mock() + mock.left_pulse = None + mock.right_pulse = None self.app.drive_train = dict() self.app.drive_train['steering'] = mock self.app.drive_train_type = "I2C_SERVO" @@ -38,9 +48,12 @@ def test_calibrate_servo_esc_1b(self): data = {"config": {"STEERING_LEFT_PWM": 444}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() - sleep(SLEEP) + + result = yield self.wait_for_attribute_value( + self.app.drive_train['steering'], 'left_pulse', 444) + assert result, "WebSocket message not processed within timeout" assert self.app.drive_train['steering'].left_pulse == 444 - assert isinstance(self.app.drive_train['steering'].right_pulse, Mock) + assert self.app.drive_train['steering'].right_pulse is None @tornado.testing.gen_test def test_calibrate_servo_esc_1a(self): @@ -48,6 +61,8 @@ def test_calibrate_servo_esc_1a(self): # Now we can run a test on the WebSocket. mock = Mock() + mock.left_pulse = None + mock.right_pulse = None self.app.drive_train = dict() self.app.drive_train['steering'] = mock self.app.drive_train_type = "PWM_STEERING_THROTTLE" @@ -55,9 +70,12 @@ def test_calibrate_servo_esc_1a(self): data = {"config": {"STEERING_LEFT_PWM": 444}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() - sleep(SLEEP) + + result = yield self.wait_for_attribute_value( + self.app.drive_train['steering'], 'left_pulse', 444) + assert result, "WebSocket message not processed within timeout" assert self.app.drive_train['steering'].left_pulse == 444 - assert isinstance(self.app.drive_train['steering'].right_pulse, Mock) + assert self.app.drive_train['steering'].right_pulse is None @tornado.testing.gen_test def test_calibrate_servo_esc_2b(self): @@ -65,6 +83,8 @@ def test_calibrate_servo_esc_2b(self): # Now we can run a test on the WebSocket. mock = Mock() + mock.left_pulse = None + mock.right_pulse = None self.app.drive_train = dict() self.app.drive_train['steering'] = mock self.app.drive_train_type = "I2C_SERVO" @@ -72,9 +92,12 @@ def test_calibrate_servo_esc_2b(self): data = {"config": {"STEERING_RIGHT_PWM": 555}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() - sleep(SLEEP) + + result = yield self.wait_for_attribute_value( + self.app.drive_train['steering'], 'right_pulse', 555) + assert result, "WebSocket message not processed within timeout" assert self.app.drive_train['steering'].right_pulse == 555 - assert isinstance(self.app.drive_train['steering'].left_pulse, Mock) + assert self.app.drive_train['steering'].left_pulse is None @tornado.testing.gen_test def test_calibrate_servo_esc_2a(self): @@ -82,6 +105,8 @@ def test_calibrate_servo_esc_2a(self): # Now we can run a test on the WebSocket. mock = Mock() + mock.left_pulse = None + mock.right_pulse = None self.app.drive_train = dict() self.app.drive_train['steering'] = mock self.app.drive_train_type = "PWM_STEERING_THROTTLE" @@ -89,9 +114,12 @@ def test_calibrate_servo_esc_2a(self): data = {"config": {"STEERING_RIGHT_PWM": 555}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() - sleep(SLEEP) + + result = yield self.wait_for_attribute_value( + self.app.drive_train['steering'], 'right_pulse', 555) + assert result, "WebSocket message not processed within timeout" assert self.app.drive_train['steering'].right_pulse == 555 - assert isinstance(self.app.drive_train['steering'].left_pulse, Mock) + assert self.app.drive_train['steering'].left_pulse is None @tornado.testing.gen_test def test_calibrate_servo_esc_3b(self): @@ -99,6 +127,8 @@ def test_calibrate_servo_esc_3b(self): # Now we can run a test on the WebSocket. mock = Mock() + mock.max_pulse = None + mock.min_pulse = None self.app.drive_train = dict() self.app.drive_train['throttle'] = mock self.app.drive_train_type = "I2C_SERVO" @@ -106,9 +136,12 @@ def test_calibrate_servo_esc_3b(self): data = {"config": {"THROTTLE_FORWARD_PWM": 666}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() - sleep(SLEEP) + + result = yield self.wait_for_attribute_value( + self.app.drive_train['throttle'], 'max_pulse', 666) + assert result, "WebSocket message not processed within timeout" assert self.app.drive_train['throttle'].max_pulse == 666 - assert isinstance(self.app.drive_train['throttle'].min_pulse, Mock) + assert self.app.drive_train['throttle'].min_pulse is None @tornado.testing.gen_test def test_calibrate_servo_esc_3a(self): @@ -116,6 +149,8 @@ def test_calibrate_servo_esc_3a(self): # Now we can run a test on the WebSocket. mock = Mock() + mock.max_pulse = None + mock.min_pulse = None self.app.drive_train = dict() self.app.drive_train['throttle'] = mock self.app.drive_train_type = "PWM_STEERING_THROTTLE" @@ -123,9 +158,12 @@ def test_calibrate_servo_esc_3a(self): data = {"config": {"THROTTLE_FORWARD_PWM": 666}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() - sleep(SLEEP) + + result = yield self.wait_for_attribute_value( + self.app.drive_train['throttle'], 'max_pulse', 666) + assert result, "WebSocket message not processed within timeout" assert self.app.drive_train['throttle'].max_pulse == 666 - assert isinstance(self.app.drive_train['throttle'].min_pulse, Mock) + assert self.app.drive_train['throttle'].min_pulse is None @tornado.testing.gen_test def test_calibrate_mm1(self): @@ -133,10 +171,14 @@ def test_calibrate_mm1(self): # Now we can run a test on the WebSocket. mock = Mock() + mock.STEERING_MID = None self.app.drive_train = mock self.app.drive_train_type = "MM1" data = {"config": {"MM1_STEERING_MID": 1234}} yield ws_client.write_message(json.dumps(data)) yield ws_client.close() - sleep(SLEEP) + + result = yield self.wait_for_attribute_value( + self.app.drive_train, 'STEERING_MID', 1234) + assert result, "WebSocket message not processed within timeout" assert self.app.drive_train.STEERING_MID == 1234 diff --git a/donkeycar/utilities/TrackSpeedPlanner/README.md b/donkeycar/utilities/TrackSpeedPlanner/README.md new file mode 100644 index 000000000..affbcc461 --- /dev/null +++ b/donkeycar/utilities/TrackSpeedPlanner/README.md @@ -0,0 +1,216 @@ +# Donkey Car Path Data Visualizer & Editor + +A web-based tool for visualizing and editing Donkey Car path data CSV files. This application provides an intuitive interface to load, visualize, and modify speed values for autonomous vehicle path planning. + +## Features + +### šÆ **Interactive Path Visualization** +- Canvas-based path display with automatic scaling and centering +- Color-coded speed visualization (red = slow, green = fast) +- Click-to-select path points with visual feedback +- Real-time updates as you modify speed values + +### š **Flexible File Operations** +- **Pi Directory Access**: Browse and load CSV files directly from the server directory +- **Upload from Browser**: Drag-and-drop or click to upload CSV files from your computer +- **Dual Save Options**: + - Save back to Pi directory (for server files) + - Download to local machine (for uploaded files) + +### ā” **Speed Editing** +- Individual speed controls for each path point +- Speed range: 0.1 to 1.0 in 0.1 increments +- Auto-scroll to selected point's controls +- Instant visual feedback on canvas + +### š± **Responsive Design** +- Works on desktop, tablet, and mobile devices +- Adaptive layout that adjusts to screen size +- Touch-friendly controls for mobile editing + +## Quick Start + +### 1. Start the Server +```bash +# Default port (note: avoid port 5000 on macOS due to AirPlay conflict) +python trackeditor.py --port=8080 + +# With debug mode +python trackeditor.py --port=8080 --debug +``` + +### 2. Access the Application +Open your browser and navigate to: +- **Local**: http://localhost:8080 +- **Network**: http://[your-ip]:8080 + +### 3. Load Path Data +Choose one of these methods: +- **From Pi Directory**: Use the dropdown to select existing CSV files +- **Upload File**: Drag & drop or click the upload area + +### 4. Edit and Save +- Click on path points to select them +- Adjust speed values using the right panel controls +- Save your changes back to the Pi or download locally + +## Server Configuration + +### Command Line Options +```bash +python trackeditor.py --help +``` + +Available options: +- `--port=PORT`: Server port (default: 5000, recommend 8080 on macOS) +- `--debug`: Enable debug mode with detailed logging + +### API Endpoints +- `GET /`: Main application interface +- `GET /api/health`: Server health check +- `GET /api/files`: List CSV files in server directory +- `POST /api/upload`: Upload CSV file from browser +- `POST /api/loadfile`: Load CSV file from server directory +- `POST /api/save`: Save modifications back to server +- `POST /api/export`: Download modified CSV file +- `POST /api/shutdown`: Graceful server shutdown + +## CSV File Format + +The application supports CSV files with the following formats: + +### With Headers (Recommended) +```csv +x,y,speed +10.5,20.3,0.8 +11.2,21.0,0.7 +12.1,22.5,0.9 +``` + +### Alternative Headers +Also supports: `pos_x,pos_y,throttle` and `X,Y,Speed` + +### Without Headers +```csv +10.5,20.3,0.8 +11.2,21.0,0.7 +12.1,22.5,0.9 +``` + +**Notes:** +- Speed values are automatically clamped to 0.1-1.0 range +- Missing speed values default to 0.5 +- Invalid rows are skipped with console warnings + +## File Structure + +``` +TrackSpeedPlanner/ +āāā trackeditor.py # Main Tornado web server +āāā static/ +ā āāā index.html # Complete web interface (HTML/CSS/JS) +āāā test_path.csv # Sample CSV file +āāā attached_assets/ # Additional CSV files +``` + +## Requirements + +- **Python 3.6+** +- **Tornado web framework** +You should run this out of your donkey environment which has python=3.11 with +tornado installed. The above is only relevant when running out of another +environment. Please see next section for that. + +### Installation +```bash +# Install Tornado if not available +pip install tornado + +# Or with conda +conda install tornado +``` + +## Technical Details + +### Visualization Engine +- **Canvas Size**: 800x600 pixels (responsive) +- **Auto-scaling**: Automatically fits path data to canvas +- **Color Coding**: RGB interpolation based on speed values +- **Selection Radius**: 15-pixel click detection around points + +### Performance +- **Raspberry Pi Zero/1**: Suitable for small to medium paths +- **Raspberry Pi 2/3**: Good performance for typical donkey car paths +- **Raspberry Pi 4+**: Excellent performance with large datasets +- **Desktop/Laptop**: Handles very large path files efficiently + +### Browser Compatibility +- Modern browsers with HTML5 Canvas support +- Chrome, Firefox, Safari, Edge +- Mobile browsers on iOS and Android + +## Troubleshooting + +### Server Won't Start +```bash +# Check Python version +python --version # Should be 3.6+ + +# Install Tornado +pip install tornado + +# Check port availability (especially on macOS) +lsof -i :5000 # If in use, try different port +python trackeditor.py --port=8080 +``` + +### Can't Access from Network +```bash +# Check your IP address +hostname -I # Linux/Pi +ipconfig # Windows +ifconfig # macOS + +# Test locally first +curl http://localhost:8080/api/health +``` + +### File Upload Issues +- Ensure files have `.csv` extension +- Check file format matches expected CSV structure +- Large files may take time to process on slower hardware + +### Port 5000 Conflicts (macOS) +macOS uses port 5000 for AirPlay. Use an alternative port: +```bash +python trackeditor.py --port=8080 +``` + +## Integration with Donkey Car + +This tool is designed to work with Donkey Car path planning: + +1. **Record Paths**: Use Donkey Car to record driving paths +2. **Edit Speeds**: Load and modify speed profiles using this tool +3. **Deploy**: Save edited paths back to your Donkey Car for autonomous driving + +## Development + +### Server Development +- Edit `trackeditor.py` for backend changes +- Supports hot reload in debug mode: `--debug` + +### Frontend Development +- Edit `static/index.html` for UI changes +- Contains all HTML, CSS, and JavaScript in a single file +- Refresh browser to see changes + +### Adding Features +The application uses a RESTful API design. To add new functionality: +1. Create new handler class in `trackeditor.py` +2. Add route to `make_app()` function +3. Implement frontend calls in `static/index.html` + +## License + +This tool is part of the Donkey Car project and follows the same open-source principles. \ No newline at end of file diff --git a/donkeycar/utilities/TrackSpeedPlanner/attached_assets/donkey_path (1)_1749922476174.csv b/donkeycar/utilities/TrackSpeedPlanner/attached_assets/donkey_path (1)_1749922476174.csv new file mode 100644 index 000000000..cbaed4ccf --- /dev/null +++ b/donkeycar/utilities/TrackSpeedPlanner/attached_assets/donkey_path (1)_1749922476174.csv @@ -0,0 +1,296 @@ +-0.0030673221917822957, 0.0018730079755187035, 0.10 +-0.007616000366397202, 0.21041828347370028, 0.21 +-0.011573938943911344, 0.4121252675540745, 0.19 +-0.01824282720917836, 0.6764627313241363, 0.21 +-0.03438853187253699, 1.1043250309303403, 0.13 +-0.05552761268336326, 1.365461734123528, 0.18 +-0.07756315887672827, 1.6166332405991852, 0.22 +-0.09831937844865024, 1.8845999259501696, 0.19 +-0.11706995643908158, 2.178405029233545, 0.2 +-0.1370603431132622, 2.4790539648383856, 0.16 +-0.14352418953785673, 2.7864206824451685, 0.2 +-0.1350443068658933, 3.2665225761011243, 0.2 +-0.13057457859395072, 3.9041928029619157, 0.17 +-0.2759307601954788, 4.08069723425433, 0.18 +-0.26287036488065496, 4.413014094810933, 0.19 +-0.3238702082890086, 4.738206502050161, 0.18 +-0.317751364025753, 5.017763447016478, 0.14 +-0.3245541320065968, 5.2854263400658965, 0.16 +-0.31516807473963127, 5.529496371280402, 0.23 +-0.3057356822537258, 5.893055530264974, 0.21 +-0.32409560115775093, 6.175591673702002, 0.21 +-0.20024774060584605, 6.445094198919833, 0.22 +-0.20232785487314686, 6.923808777704835, 0.2 +-0.21382020541932434, 7.269447682891041, 0.19 +-0.21788450243184343, 7.610775483772159, 0.21 +-0.2115321487071924, 7.955708012916148, 0.21 +-0.20915608439827338, 8.310093288309872, 0.19 +-0.18674702144926414, 8.672987627796829, 0.21 +-0.18244739610236138, 9.02458624728024, 0.22 +-0.17332758504198864, 9.560641455464065, 0.2 +-0.17610309325391427, 9.919872448313981, 0.2 +-0.17738666926743463, 10.28278435766697, 0.21 +-0.17360054177697748, 10.650270047597587, 0.2 +-0.1684942006249912, 11.020330023020506, 0.2 +-0.16525320091750473, 11.380987118463963, 0.2 +-0.151132392056752, 11.74247475201264, 0.19 +-0.14113290887326002, 12.096425587311387, 0.19 +-0.1391996091697365, 12.436963462736458, 0.2 +-0.12483004067325965, 12.770561928395182, 0.19 +-0.10620368458330631, 13.108556433580816, 0.19 +-0.09411374182673171, 13.441250945907086, 0.2 +-0.08369441644754261, 13.939435167703778, 0.2 +-0.07446853868896142, 14.278802696149796, 0.2 +-0.05699604615801945, 14.785791465081275, 0.19 +-0.04335269337752834, 15.12936898181215, 0.19 +-0.03141556039918214, 15.463172893971205, 0.19 +-0.01712639193283394, 15.787353243678808, 0.2 +-0.0009073510300368071, 16.109485612250865, 0.21 +0.016964310547336936, 16.435112840030342, 0.22 +0.04258392611518502, 16.774894473608583, 0.21 +0.057979221222922206, 17.133600993081927, 0.17 +0.07151232560863718, 17.483273970428854, 0.16 +0.08968014811398461, 17.805574386846274, 0.15 +0.0993453242117539, 18.101353115867823, 0.14 +0.09989347273949534, 18.374678417574614, 0.16 +0.09441870549926534, 18.626816655974835, 0.16 +0.0819710700889118, 18.860546175390482, 0.16 +0.06464427232276648, 19.07400227803737, 0.15 +0.04883137933211401, 19.274702372029424, 0.17 +0.00900184892816469, 19.57053424604237, 0.18 +-0.056796766992192715, 19.86584907397628, 0.17 +-0.18286227021599188, 20.249033151660115, 0.18 +-0.26627612952142954, 20.438860008493066, 0.22 +-0.3643774958909489, 20.638046366162598, 0.16 +-0.5019444086938165, 20.852528900373727, 0.18 +-0.6410112387384288, 21.04338488727808, 0.18 +-0.7983938870602287, 21.217775813303888, 0.18 +-0.9639892141567543, 21.380601855460554, 0.17 +-1.1309032734134234, 21.52201599860564, 0.17 +-1.3028351159300655, 21.6459281463176, 0.16 +-1.637306509364862, 21.83438921859488, 0.17 +-1.88962384784827, 21.93257800769061, 0.16 +-2.1394269879092462, 22.002119564451277, 0.19 +-2.5065589963342063, 22.0525323394686, 0.19 +-2.7168480378459208, 22.05543560255319, 0.2 +-2.937315894290805, 22.044574580155313, 0.19 +-3.1725628016283736, 22.0133399781771, 0.2 +-3.4235481740906835, 21.96340212924406, 0.19 +-3.826888970565051, 21.82131560239941, 0.2 +-4.0977833410725, 21.68234611535445, 0.19 +-4.365809155919123, 21.499213048722595, 0.19 +-4.610208795464132, 21.270261735655367, 0.19 +-4.916357762471307, 20.860479433089495, 0.17 +-5.079315411276184, 20.55289688752964, 0.17 +-5.207706963701639, 20.245388622395694, 0.18 +-5.317029797064606, 19.928483449853957, 0.18 +-5.396341048006434, 19.61039865994826, 0.19 +-5.448629320191685, 19.283218421507627, 0.18 +-5.479991402826272, 18.955120807979256, 0.19 +-5.491415091499221, 18.62445211224258, 0.19 +-5.484423849266022, 18.28383804159239, 0.19 +-5.482304791279603, 17.771695592440665, 0.2 +-5.484244559134822, 17.43041903525591, 0.19 +-5.50052320380928, 16.911231032572687, 0.19 +-5.514527516148519, 16.563408902846277, 0.18 +-5.535436813312117, 16.22414111159742, 0.2 +-5.561508715094533, 15.888426358345896, 0.18 +-5.587731849460397, 15.554005674552172, 0.19 +-5.619251478870865, 15.226463454309851, 0.18 +-5.656113327422645, 14.900444301310927, 0.17 +-5.6966679152683355, 14.579073733650148, 0.18 +-5.733083840052132, 14.267271320335567, 0.19 +-5.773057037906256, 13.956976734567434, 0.19 +-5.81742249650415, 13.645426867995411, 0.18 +-5.864255073538516, 13.330019732937217, 0.18 +-5.91576976532815, 13.017422806005925, 0.18 +-5.96446127386298, 12.712373756803572, 0.16 +-6.0223973040119745, 12.407957674004138, 0.17 +-6.069540124794003, 12.113237618934363, 0.17 +-6.11194250889821, 11.822540109977126, 0.17 +-6.148421284044161, 11.54139551660046, 0.15 +-6.179633405234199, 11.268886036705226, 0.14 +-6.189783743757289, 11.011340711265802, 0.15 +-6.194082877191249, 10.773875819519162, 0.19 +-6.191221747023519, 10.538750671315938, 0.17 +-6.189203486777842, 10.299939072225243, 0.18 +-6.180239249079023, 10.057374525815248, 0.17 +-6.170320324134082, 9.812585640698671, 0.19 +-6.158895497967023, 9.53509511006996, 0.18 +-6.146881507534999, 9.250581619795412, 0.17 +-6.131272407073993, 8.968992357607931, 0.17 +-6.111252126574982, 8.690874401014298, 0.18 +-6.095234932436142, 8.419069822411984, 0.17 +-6.083907731925137, 8.149152107071131, 0.17 +-6.060171733202878, 7.755021872464567, 0.19 +-6.049237527768128, 7.096905063837767, 0.18 +-6.032837463484611, 6.831930467393249, 0.16 +-6.025043771602213, 6.585312901996076, 0.17 +-6.014293004584033, 6.342917788773775, 0.18 +-6.008058929757681, 6.103700836189091, 0.17 +-5.999913903942797, 5.860219873487949, 0.18 +-5.983473622589372, 5.200763284228742, 0.18 +-5.982503050297964, 4.953280556481332, 0.21 +-5.990941184631083, 4.649180646520108, 0.17 +-5.990375204302836, 4.183222070336342, 0.19 +-6.019994155445602, 3.7171518155373633, 0.17 +-6.027824205812067, 3.4082449730485678, 0.18 +-6.0477328937267885, 3.110152824781835, 0.17 +-6.06179206300294, 2.8129341518506408, 0.17 +-6.077425037568901, 2.521454067900777, 0.17 +-6.1049633483053185, 2.242079977877438, 0.17 +-6.127303571149241, 1.8302442184649408, 0.17 +-6.148689898371231, 1.5491554676555097, 0.18 +-6.168582712823991, 1.2719322978518903, 0.17 +-6.18979416473303, 0.9923195131123066, 0.18 +-6.205935905280057, 0.7172805098816752, 0.14 +-6.220681760751177, 0.4536799010820687, 0.15 +-6.23177575081354, 0.19965160032734275, 0.16 +-6.234200189646799, -0.04632468428462744, 0.17 +-6.226472593843937, -0.28167605539783835, 0.17 +-6.2231546033290215, -0.5203140652738512, 0.18 +-6.2245912424405105, -0.7646366409026086, 0.17 +-6.226796769245993, -1.0229885149747133, 0.11 +-6.228753546136431, -1.3092295587994158, 0.11 +-6.235858633473981, -1.5712332543917, 0.16 +-6.241867145581637, -1.8188410829752684, 0.17 +-6.247505407140125, -2.0610962817445397, 0.17 +-6.26160208293004, -2.305680143646896, 0.16 +-6.271030831092503, -2.554736233782023, 0.16 +-6.276966826932039, -2.7938492889516056, 0.18 +-6.286932715040166, -3.029788340907544, 0.2 +-6.305014460871462, -3.4180211033672094, 0.19 +-6.318147216457874, -3.701765866484493, 0.18 +-6.331692788458895, -3.9958493197336793, 0.18 +-6.345709400018677, -4.2880819295533, 0.2 +-6.364543029514607, -4.745379496365786, 0.2 +-6.376225057931151, -5.068289315328002, 0.2 +-6.386946090089623, -5.3926847986876965, 0.21 +-6.395901210664306, -5.7383338352665305, 0.2 +-6.403380268428009, -6.1011709361337125, 0.2 +-6.416631709085777, -6.474855022039264, 0.21 +-6.434793751104735, -6.853483611252159, 0.22 +-6.448680644913111, -7.054484212305397, 0.21 +-6.467507634719368, -7.453976237680763, 0.21 +-6.477391435939353, -7.661290214397013, 0.21 +-6.486641930707265, -7.870456422679126, 0.2 +-6.495588385616429, -8.082026096526533, 0.2 +-6.524012937385123, -8.503228714689612, 0.22 +-6.5402425257489085, -8.712520023807883, 0.19 +-6.551985369238537, -8.9284982024692, 0.19 +-6.569215367722791, -9.14092057198286, 0.2 +-6.586892755993176, -9.34872206300497, 0.2 +-6.60186992114177, -9.563379854895175, 0.19 +-6.631458728807047, -9.987896815408021, 0.21 +-6.66829125926597, -10.405518687330186, 0.21 +-6.706335340102669, -10.83199497172609, 0.19 +-6.727308536821511, -11.045678162947297, 0.19 +-6.772747574665118, -11.463965290691704, 0.19 +-6.790943300409708, -11.675456004682928, 0.19 +-6.810922395961825, -11.88674680935219, 0.19 +-6.825367045763414, -12.096053357236087, 0.19 +-6.84090342768468, -12.300179475452751, 0.2 +-6.845361111394595, -12.70828955899924, 0.19 +-6.8295608789194375, -12.910467007663101, 0.19 +-6.805793757550418, -13.110865643247962, 0.2 +-6.778116438537836, -13.309450836852193, 0.19 +-6.721929965831805, -13.704227453097701, 0.22 +-6.677289731043857, -14.101860418450087, 0.18 +-6.655346964776982, -14.306860540062189, 0.2 +-6.6208854574360885, -14.699235502164811, 0.19 +-6.602681154443417, -14.904942438937724, 0.2 +-6.586696574522648, -15.104535905178636, 0.19 +-6.572500093141571, -15.304483472369611, 0.19 +-6.559116529591847, -15.504608781542629, 0.18 +-6.5455659635481425, -15.70418146904558, 0.19 +-6.522173349803779, -16.10052494890988, 0.2 +-6.501548283093143, -16.687991101294756, 0.2 +-6.486073686159216, -17.08038865402341, 0.19 +-6.440251832071226, -17.472675912082195, 0.19 +-6.36428901553154, -17.850999934598804, 0.19 +-6.262275043467525, -18.219204135239124, 0.18 +-6.1392165693687275, -18.574475537519902, 0.2 +-5.995439703168813, -18.916996038053185, 0.2 +-5.82680745475227, -19.256404439453036, 0.2 +-5.639526880986523, -19.578427204396576, 0.19 +-5.435757920437027, -19.888955731876194, 0.19 +-5.210173988540191, -20.172337487339973, 0.19 +-4.977268960385118, -20.43454325897619, 0.2 +-4.730871926352847, -20.675071679521352, 0.2 +-4.467291708278935, -20.89875599881634, 0.2 +-4.1789457486593165, -21.091809764504433, 0.2 +-3.869326696614735, -21.245153723284602, 0.2 +-3.532948974694591, -21.36252835020423, 0.19 +-3.1829956967849284, -21.448438162449747, 0.21 +-2.8091336390934885, -21.49004360055551, 0.2 +-2.4208278381847776, -21.47452077968046, 0.2 +-2.0313652566983365, -21.380517796613276, 0.2 +-1.8465069359517656, -21.30194366723299, 0.2 +-1.5211410725605674, -21.074422171339393, 0.2 +-1.3832566144992597, -20.92859201366082, 0.19 +-1.1700898366398178, -20.594474390614778, 0.19 +-1.050913849612698, -20.225942220538855, 0.19 +-1.0023386162356474, -19.85846951743588, 0.2 +-0.9697019049199298, -19.495108449365944, 0.2 +-0.9172337215277366, -19.10883137024939, 0.21 +-0.8606301248073578, -18.73237776197493, 0.21 +-0.7884204352158122, -18.352733087725937, 0.2 +-0.7270369815523736, -17.967640206217766, 0.2 +-0.6831958081456833, -17.584429083857685, 0.19 +-0.6613254428957589, -17.20860238838941, 0.2 +-0.6603923634975217, -16.83315101219341, 0.2 +-0.6465109558193944, -16.461688498500735, 0.21 +-0.6254944946267642, -16.09084093850106, 0.21 +-0.5970987328910269, -15.711006932891905, 0.2 +-0.5658231334527954, -15.335999253671616, 0.2 +-0.53315547487, -14.778906599152833, 0.19 +-0.5095244732801802, -14.234849255532026, 0.21 +-0.5074288263567723, -13.875290488824248, 0.19 +-0.5041894205496646, -13.514818074181676, 0.21 +-0.5201187747879885, -12.98556698486209, 0.21 +-0.5449145335587673, -12.449406960979104, 0.2 +-0.5511244894587435, -12.093286258168519, 0.2 +-0.5525593199417926, -11.729080291930586, 0.2 +-0.5538571049110033, -11.367830416187644, 0.2 +-0.5460127148544416, -11.000194695778191, 0.2 +-0.5372593509964645, -10.640138713642955, 0.2 +-0.5218230555183254, -10.276630812324584, 0.18 +-0.5084112170734443, -9.922155069652945, 0.12 +-0.487563535629306, -9.5900893737562, 0.14 +-0.47043087967904285, -9.294005033560097, 0.18 +-0.44222241727402434, -8.879079768434167, 0.17 +-0.4225362437427975, -8.62604822870344, 0.17 +-0.39984264987288043, -8.381907118484378, 0.17 +-0.3839407638297416, -8.153960082214326, 0.17 +-0.3676176664303057, -7.819639571942389, 0.18 +-0.35717150644632056, -7.603465637657791, 0.18 +-0.3537375403684564, -7.391294858884066, 0.18 +-0.3571330072591081, -7.180727923288941, 0.18 +-0.3589121018303558, -6.970913514494896, 0.17 +-0.3638231527293101, -6.766797524876893, 0.19 +-0.3713273367029615, -6.562290033791214, 0.19 +-0.38785169285256416, -6.235815151128918, 0.19 +-0.3994927635649219, -6.00264657381922, 0.18 +-0.4094317204435356, -5.646124672610313, 0.19 +-0.41835511795943603, -5.398758745286614, 0.19 +-0.420521495048888, -5.139261462260038, 0.19 +-0.4209439161350019, -4.884580806829035, 0.18 +-0.41695042565697804, -4.625874835532159, 0.18 +-0.4130515170400031, -4.378249000757933, 0.18 +-0.40592302032746375, -4.132497558835894, 0.2 +-0.39784771675476804, -3.8898938009515405, 0.18 +-0.3842253560433164, -3.643828428350389, 0.16 +-0.3669228610233404, -3.4038889915682375, 0.16 +-0.34668704052455723, -3.181519421748817, 0.16 +-0.3303708662861027, -2.962071313057095, 0.17 +-0.31298660242464393, -2.750573665369302, 0.16 +-0.30068939056945965, -2.545681159477681, 0.17 +-0.2819177901255898, -2.152652540244162, 0.16 +-0.2668647714308463, -1.8720637680962682, 0.16 +-0.2542174984118901, -1.6071524657309055, 0.17 +-0.23450717522064224, -1.2562394505366683, 0.18 +-0.2238464216934517, -0.976905960123986, 0.17 +-0.20772531983675435, -0.704267649911344, 0.18 +-0.18620157655095682, -0.33102354081347585, 0.16 +-0.1767618251615204, -0.06165247969329357, -0.0 +-0.16035895358072594, 0.16795242298394442, -0.01 diff --git a/donkeycar/utilities/TrackSpeedPlanner/static/index.html b/donkeycar/utilities/TrackSpeedPlanner/static/index.html new file mode 100644 index 000000000..3ef29ed4b --- /dev/null +++ b/donkeycar/utilities/TrackSpeedPlanner/static/index.html @@ -0,0 +1,734 @@ + + +
+ + +