diff --git a/.gitignore b/.gitignore index 5694eb9e..76b73536 100644 --- a/.gitignore +++ b/.gitignore @@ -70,3 +70,4 @@ MLPR.py docs/Makefile sifibridge-* *.pyc +*.model \ No newline at end of file diff --git a/docs/source/documentation/prediction/discrete_classification_doc.md b/docs/source/documentation/prediction/discrete_classification_doc.md new file mode 100644 index 00000000..1f66ec6d --- /dev/null +++ b/docs/source/documentation/prediction/discrete_classification_doc.md @@ -0,0 +1,135 @@ +# Discrete Classifiers + +Unlike continuous classifiers that output a prediction for every window of EMG data, discrete classifiers are designed for recognizing transient, isolated gestures. These classifiers operate on variable-length templates (sequences of windows) and are well-suited for detecting distinct movements like finger snaps, taps, or quick hand gestures. + +Discrete classifiers expect input data in a different format than continuous classifiers: +- **Continuous classifiers**: Operate on individual windows of shape `(n_windows, n_features)`. +- **Discrete classifiers**: Operate on templates (sequences of windows) where each template has shape `(n_frames, n_features)` and can vary in length. + +To prepare data for discrete classifiers, use the `discrete=True` parameter when calling `parse_windows()` on your `OfflineDataHandler`: + +```Python +from libemg.data_handler import OfflineDataHandler + +odh = OfflineDataHandler() +odh.get_data('./data/', regex_filters) +windows, metadata = odh.parse_windows(window_size=50, window_increment=10, discrete=True) +# windows is now a list of templates, one per file/rep +``` + +For feature extraction with discrete data, use the `discrete=True` parameter: + +```Python +from libemg.feature_extractor import FeatureExtractor + +fe = FeatureExtractor() +features = fe.extract_features(['MAV', 'ZC', 'SSC', 'WL'], windows, discrete=True, array=True) +# features is a list of arrays, one per template +``` + +## Majority Vote LDA (MVLDA) + +A classifier that applies Linear Discriminant Analysis (LDA) to each frame within a template and uses majority voting to determine the final prediction. This approach is simple yet effective for discrete gesture recognition. + +```Python +from libemg._discrete_models import MVLDA + +model = MVLDA() +model.fit(train_features, train_labels) +predictions = model.predict(test_features) +probabilities = model.predict_proba(test_features) +``` + +## Dynamic Time Warping Classifier (DTWClassifier) + +A template-matching classifier that uses Dynamic Time Warping (DTW) distance to compare test samples against stored training templates. DTW is particularly useful when gestures may vary in speed or duration, as it can align sequences with different temporal characteristics. + +```Python +from libemg._discrete_models import DTWClassifier + +model = DTWClassifier(n_neighbors=3) +model.fit(train_features, train_labels) +predictions = model.predict(test_features) +probabilities = model.predict_proba(test_features) +``` + +The `n_neighbors` parameter controls how many nearest templates are used for voting (k-nearest neighbors with DTW distance). + +## Pretrained Myo Cross-User Model (MyoCrossUserPretrained) + +A pretrained deep learning model for cross-user discrete gesture recognition using the Myo armband. This model uses a convolutional-recurrent architecture and recognizes 6 gestures: Nothing, Close, Flexion, Extension, Open, and Pinch. + +```Python +from libemg._discrete_models import MyoCrossUserPretrained + +model = MyoCrossUserPretrained() +# Model is automatically downloaded on first use + +# The model provides recommended parameters for OnlineDiscreteClassifier +print(model.args) +# {'window_size': 10, 'window_increment': 5, 'null_label': 0, ...} + +predictions = model.predict(test_data) +probabilities = model.predict_proba(test_data) +``` + +This model expects raw windowed EMG data (not extracted features) with shape `(batch_size, seq_len, n_channels, n_samples)`. + +## Online Discrete Classification + +For real-time discrete gesture recognition, use the `OnlineDiscreteClassifier`: + +```Python +from libemg.emg_predictor import OnlineDiscreteClassifier +from libemg._discrete_models import MyoCrossUserPretrained + +# Load pretrained model +model = MyoCrossUserPretrained() + +# Create online classifier +classifier = OnlineDiscreteClassifier( + odh=online_data_handler, + model=model, + window_size=model.args['window_size'], + window_increment=model.args['window_increment'], + null_label=model.args['null_label'], + feature_list=model.args['feature_list'], # None for raw data + template_size=model.args['template_size'], + min_template_size=model.args['min_template_size'], + gesture_mapping=model.args['gesture_mapping'], + buffer_size=model.args['buffer_size'], + rejection_threshold=0.5, + debug=True +) + +# Start recognition loop +classifier.run() +``` + +## Creating Custom Discrete Classifiers + +Any custom discrete classifier should implement the following methods to work with LibEMG: + +- `fit(x, y)`: Train the model where `x` is a list of templates and `y` is the corresponding labels. +- `predict(x)`: Return predicted class labels for a list of templates. +- `predict_proba(x)`: Return predicted class probabilities for a list of templates. + +```Python +class CustomDiscreteClassifier: + def __init__(self): + self.classes_ = None + + def fit(self, x, y): + # x: list of templates (each template is an array of frames) + # y: labels for each template + self.classes_ = np.unique(y) + # ... training logic + + def predict(self, x): + # Return array of predictions + pass + + def predict_proba(self, x): + # Return array of shape (n_samples, n_classes) + pass +``` \ No newline at end of file diff --git a/docs/source/documentation/prediction/prediction.rst b/docs/source/documentation/prediction/prediction.rst index f15dfc84..ab3afc99 100644 --- a/docs/source/documentation/prediction/prediction.rst +++ b/docs/source/documentation/prediction/prediction.rst @@ -6,6 +6,9 @@ EMG Prediction .. include:: classification_doc.md :parser: myst_parser.sphinx_ +.. include:: discrete_classification_doc.md + :parser: myst_parser.sphinx_ + .. include:: regression_doc.md :parser: myst_parser.sphinx_ diff --git a/docs/source/documentation/prediction/predictors.md b/docs/source/documentation/prediction/predictors.md index e9e1246e..24629341 100644 --- a/docs/source/documentation/prediction/predictors.md +++ b/docs/source/documentation/prediction/predictors.md @@ -2,7 +2,7 @@ After recording, processing, and extracting features from a window of EMG data, it is passed to a machine learning algorithm for prediction. These control systems have evolved in the prosthetics community for continuously predicting muscular contractions for enabling prosthesis control. Therefore, they are primarily limited to recognizing static contractions (e.g., hand open/close and wrist flexion/extension) as they have no temporal awareness. Currently, this is the form of recognition supported by LibEMG and is an initial step to explore EMG as an interaction opportunity for general-purpose use. This section highlights the machine-learning strategies that are part of `LibEMG`'s pipeline. -There are two types of models supported in `LibEMG`: classifiers and regressors. Classifiers output a discrete motion class for each window, whereas regressors output a continuous prediction along a degree of freedom. For both classifiers and regressors, `LibEMG` supports statistical models as well as deep learning models. Additionally, a number of post-processing methods (i.e., techniques to improve performance after prediction) are supported for all models. +There are three types of models supported in `LibEMG`: classifiers, regressors, and discrete classifiers. Classifiers output a motion class for each window of EMG data, whereas regressors output a continuous prediction along a degree of freedom. Discrete classifiers are designed for recognizing transient, isolated gestures and operate on variable-length templates rather than individual windows. For classifiers and regressors, `LibEMG` supports statistical models as well as deep learning models. Additionally, a number of post-processing methods (i.e., techniques to improve performance after prediction) are supported for all models. ## Statistical Models diff --git a/libemg/__init__.py b/libemg/__init__.py index 675cd197..2154b5a7 100644 --- a/libemg/__init__.py +++ b/libemg/__init__.py @@ -11,3 +11,4 @@ from libemg import gui from libemg import shared_memory_manager from libemg import environments +from libemg import _discrete_models diff --git a/libemg/_discrete_models/DTW.py b/libemg/_discrete_models/DTW.py new file mode 100644 index 00000000..180a0f15 --- /dev/null +++ b/libemg/_discrete_models/DTW.py @@ -0,0 +1,126 @@ +from tslearn.metrics import dtw_path +import numpy as np + + +class DTWClassifier: + """Dynamic Time Warping k-Nearest Neighbors classifier. + + A classifier that uses Dynamic Time Warping (DTW) distance for template + matching with k-nearest neighbors. Suitable for discrete gesture recognition + where temporal alignment between samples varies. + + Parameters + ---------- + n_neighbors: int, default=1 + Number of neighbors to use for k-nearest neighbors voting. + + Attributes + ---------- + templates: list of ndarray + The training templates stored after fitting. + labels: ndarray + The labels corresponding to each template. + classes_: ndarray + The unique class labels known to the classifier. + """ + + def __init__(self, n_neighbors=1): + """Initialize the DTW classifier. + + Parameters + ---------- + n_neighbors: int, default=1 + Number of neighbors to use for k-nearest neighbors voting. + """ + self.n_neighbors = n_neighbors + self.templates = None + self.labels = None + self.classes_ = None + + def fit(self, features, labels): + """Fit the DTW classifier by storing training templates. + + Parameters + ---------- + features: list of ndarray + A list of training samples (templates) where each sample is + a 2D array of shape (n_frames, n_features). + labels: array-like + The target labels for each template. + """ + self.templates = features + self.labels = np.array(labels) + self.classes_ = np.unique(labels) + + def predict(self, samples): + """Predict class labels for samples. + + Parameters + ---------- + samples: list of ndarray + A list of samples to classify where each sample is a 2D array + of shape (n_frames, n_features). + + Returns + ------- + ndarray + Predicted class labels for each sample. + """ + # We can reuse predict_proba logic to get the class with highest probability + probas = self.predict_proba(samples) + return self.classes_[np.argmax(probas, axis=1)] + + def predict_proba(self, samples, gamma=None, eps=1e-12): + """Predict class probabilities using DTW distance-weighted voting. + + Computes DTW distances to all templates, selects k-nearest neighbors, + and computes class probabilities using exponentially weighted voting. + + Parameters + ---------- + samples: list of ndarray + A list of samples to classify where each sample is a 2D array + of shape (n_frames, n_features). + gamma: float, default=None + The kernel bandwidth for distance weighting. If None, automatically + computed based on median neighbor distance. + eps: float, default=1e-12 + Small constant to prevent division by zero. + + Returns + ------- + ndarray + Predicted class probabilities of shape (n_samples, n_classes). + """ + if self.templates is None: + raise ValueError("Call fit() before predict_proba().") + + X = np.asarray(samples, dtype=object) + out = np.zeros((len(X), len(self.classes_)), dtype=float) + + for i, s in enumerate(X): + # DTW distances to templates + dists = np.array([dtw_path(t, s)[1] for t in self.templates], dtype=float) + + # kNN + nn_idx = np.argsort(dists)[:self.n_neighbors] + nn_dists = dists[nn_idx] + nn_labels = self.labels[nn_idx] + + # choose gamma if not provided (scale to typical distance) + g = gamma + if g is None: + scale = np.median(nn_dists) if len(nn_dists) else 1.0 + g = 1.0 / max(scale, eps) + + weights = np.exp(-g * nn_dists) # closer -> bigger weight + + # accumulate per class + for cls_j, cls in enumerate(self.classes_): + out[i, cls_j] = weights[nn_labels == cls].sum() + + # normalize to probabilities + z = out[i].sum() + out[i] = out[i] / max(z, eps) + + return out diff --git a/libemg/_discrete_models/MVLDA.py b/libemg/_discrete_models/MVLDA.py new file mode 100644 index 00000000..fa86a541 --- /dev/null +++ b/libemg/_discrete_models/MVLDA.py @@ -0,0 +1,95 @@ +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +import numpy as np +from scipy import stats + + +class MVLDA: + """Majority Vote Linear Discriminant Analysis classifier. + + A classifier that uses Linear Discriminant Analysis (LDA) on individual frames + and aggregates predictions using majority voting. This is designed for discrete + gesture recognition where each sample contains multiple frames. + + Attributes + ---------- + model: LinearDiscriminantAnalysis + The underlying LDA model. + classes_: ndarray + The class labels known to the classifier. + """ + + def __init__(self): + """Initialize the MVLDA classifier.""" + self.model = None + self.classes_ = None + + def fit(self, x, y): + """Fit the MVLDA classifier on training data. + + Parameters + ---------- + x: list of ndarray + A list of samples where each sample is a 2D array of shape + (n_frames, n_features). Each sample can have a different number of frames. + y: array-like + The target labels for each sample. + """ + self.model = LinearDiscriminantAnalysis() + # Create a flat array of labels corresponding to every frame in x + labels = np.hstack([[v] * x[i].shape[0] for i, v in enumerate(y)]) + self.model.fit(np.vstack(x), labels) + # Store classes for consistent probability mapping + self.classes_ = self.model.classes_ + + def predict(self, y): + """Predict class labels using majority voting. + + Performs frame-level LDA predictions and returns the majority vote + for each sample. + + Parameters + ---------- + y: list of ndarray + A list of samples where each sample is a 2D array of shape + (n_frames, n_features). + + Returns + ------- + ndarray + Predicted class labels for each sample. + """ + preds = [] + for s in y: + frame_predictions = self.model.predict(s) + # Majority vote on the labels + majority_vote = stats.mode(frame_predictions, keepdims=False)[0] + preds.append(majority_vote) + return np.array(preds) + + def predict_proba(self, y): + """Predict class probabilities using soft voting. + + Calculates probabilities by averaging the frame-level probabilities + for each sample (soft voting). + + Parameters + ---------- + y: list of ndarray + A list of samples where each sample is a 2D array of shape + (n_frames, n_features). + + Returns + ------- + ndarray + Predicted class probabilities of shape (n_samples, n_classes). + """ + probas = [] + for s in y: + # Get probabilities for each frame: shape (n_frames, n_classes) + frame_probas = self.model.predict_proba(s) + + # Average probabilities across all frames in this sample + sample_proba = np.mean(frame_probas, axis=0) + probas.append(sample_proba) + + return np.array(probas) \ No newline at end of file diff --git a/libemg/_discrete_models/MyoCrossUser.py b/libemg/_discrete_models/MyoCrossUser.py new file mode 100644 index 00000000..a6953bff --- /dev/null +++ b/libemg/_discrete_models/MyoCrossUser.py @@ -0,0 +1,306 @@ +import torch +import torch.nn as nn +import numpy as np +import os +import urllib.request + +MODEL_URL = "https://github.com/eeddy/DiscreteMCI/raw/main/Other/Discrete.model" +DEFAULT_MODEL_PATH = os.path.join("./Discrete.model") + + +class DiscreteClassifier(nn.Module): + """Convolutional-Recurrent neural network for discrete gesture classification. + + A deep learning model architecture combining convolutional layers for spatial + feature extraction, recurrent layers (GRU) for temporal modeling, and MLP + layers for classification. This architecture is required for torch.load to + deserialize pretrained models. + + Parameters + ---------- + emg_size: tuple + The shape of input EMG data as (sequence_length, n_channels, n_samples). + file_name: str, default=None + Optional filename for saving/loading the model. + temporal_hidden_size: int, default=128 + Hidden size for the GRU temporal layers. + temporal_layers: int, default=3 + Number of stacked GRU layers. + mlp_layers: list of int, default=[128, 64, 32] + Sizes of the MLP hidden layers. + n_classes: int, default=6 + Number of output classes. + type: str, default='GRU' + Type of recurrent layer (currently only GRU is implemented). + conv_kernel_sizes: list of int, default=[3, 3, 3] + Kernel sizes for each convolutional layer. + conv_out_channels: list of int, default=[16, 32, 64] + Number of output channels for each convolutional layer. + """ + + def __init__(self, emg_size, file_name=None, temporal_hidden_size=128, temporal_layers=3, + mlp_layers=[128, 64, 32], n_classes=6, type='GRU', + conv_kernel_sizes=[3, 3, 3], conv_out_channels=[16, 32, 64]): + super().__init__() + + self.file_name = file_name + self.log = {} + self.min_loss = 0 + + dropout = 0.2 + + self.conv_layers = nn.ModuleList() + in_channels = emg_size[1] + for i in range(len(conv_out_channels)): + self.conv_layers.append(nn.Conv1d(in_channels=in_channels, out_channels=conv_out_channels[i], + kernel_size=conv_kernel_sizes[i], padding='same')) + self.conv_layers.append(nn.BatchNorm1d(conv_out_channels[i])) + self.conv_layers.append(nn.ReLU()) + self.conv_layers.append(nn.MaxPool1d(kernel_size=2)) + self.conv_layers.append(nn.Dropout(dropout)) + in_channels = conv_out_channels[i] + + spoof_emg_input = torch.zeros((1, *emg_size)) + conv_out = self.forward_conv(spoof_emg_input) + conv_out_size = conv_out.shape[-1] + + self.temporal = nn.GRU(conv_out_size, temporal_hidden_size, num_layers=temporal_layers, + batch_first=True, dropout=dropout) + + emg_output_shape = self.forward_temporal(conv_out).shape[-1] + + self.initial_layer = nn.Linear(emg_output_shape, mlp_layers[0]) + self.layer1 = nn.Linear(mlp_layers[0], mlp_layers[1]) + self.layer2 = nn.Linear(mlp_layers[1], mlp_layers[2]) + self.output_layer = nn.Linear(mlp_layers[-1], n_classes) + self.relu = nn.ReLU() + + def forward_conv(self, x): + """Apply convolutional layers to input. + + Parameters + ---------- + x: torch.Tensor + Input tensor of shape (batch_size, seq_len, channels, samples). + + Returns + ------- + torch.Tensor + Convolved features of shape (batch_size, seq_len, flattened_features). + """ + batch_size, seq_len, channels, samples = x.shape + x = x.view(batch_size * seq_len, channels, samples) + for layer in self.conv_layers: + x = layer(x) + _, channels_out, samples_out = x.shape + x = x.view(batch_size, seq_len, channels_out * samples_out) + return x + + def forward_temporal(self, emg, lengths=None): + """Apply temporal (GRU) layers to convolutional features. + + Parameters + ---------- + emg: torch.Tensor + Input tensor from convolutional layers. + lengths: array-like, default=None + Optional sequence lengths for variable-length inputs. + + Returns + ------- + torch.Tensor + Temporal features from the last time step. + """ + out, _ = self.temporal(emg) + if lengths is not None: + out = torch.stack([s[lengths[i]-1] for i, s in enumerate(out)]) + else: + out = out[:, -1, :] + return out + + def forward_mlp(self, x): + """Apply MLP classification layers. + + Parameters + ---------- + x: torch.Tensor + Input features from temporal layers. + + Returns + ------- + torch.Tensor + Output logits of shape (batch_size, n_classes). + """ + out = self.initial_layer(x) + out = self.relu(out) + out = self.layer1(out) + out = self.relu(out) + out = self.layer2(out) + out = self.relu(out) + out = self.output_layer(out) + return out + + def forward_once(self, emg, emg_len=None): + """Complete forward pass through the network. + + Parameters + ---------- + emg: torch.Tensor + Input EMG tensor of shape (batch_size, seq_len, channels, samples). + emg_len: array-like, default=None + Optional sequence lengths for variable-length inputs. + + Returns + ------- + torch.Tensor + Output logits of shape (batch_size, n_classes). + """ + out = self.forward_conv(emg) + out = self.forward_temporal(out, emg_len) + out = self.forward_mlp(out) + return out + + def predict(self, x, device='cpu'): + """Predict class labels for input samples. + + Parameters + ---------- + x: ndarray or torch.Tensor + Input EMG data of shape (batch_size, seq_len, channels, samples). + device: str, default='cpu' + Device to run inference on ('cpu' or 'cuda'). + + Returns + ------- + ndarray + Predicted class labels for each sample. + """ + self.to(device) + if not isinstance(x, torch.Tensor): + x = torch.tensor(x, dtype=torch.float32) + preds = self.forward_once(x.to(device)) + return np.array([p.argmax().item() for p in preds]) + + def predict_proba(self, x, device='cpu'): + """Predict class probabilities for input samples. + + Parameters + ---------- + x: ndarray or torch.Tensor + Input EMG data of shape (batch_size, seq_len, channels, samples). + device: str, default='cpu' + Device to run inference on ('cpu' or 'cuda'). + + Returns + ------- + ndarray + Predicted class probabilities of shape (batch_size, n_classes). + """ + self.to(device) + if not isinstance(x, torch.Tensor): + x = torch.tensor(x, dtype=torch.float32) + logits = self.forward_once(x.to(device)) + probs = torch.softmax(logits, dim=-1) + return probs.detach().cpu().numpy() + + +class MyoCrossUserPretrained: + """Pretrained cross-user model for Myo armband discrete gesture recognition. + + A wrapper class that automatically downloads and loads a pretrained + DiscreteClassifier model trained on Myo armband data for cross-user + gesture recognition. The model recognizes 6 gestures: Nothing, Close, + Flexion, Extension, Open, and Pinch. + + Parameters + ---------- + model_path: str, default=None + Path to save/load the model file. If None, uses './Discrete.model'. + + Attributes + ---------- + model: DiscreteClassifier + The loaded pretrained model. + args: dict + Recommended arguments for use with OnlineDiscreteClassifier, including + window_size, window_increment, null_label, template_size, and gesture_mapping. + """ + + def __init__(self, model_path=None): + """Initialize and load the pretrained Myo cross-user model. + + Parameters + ---------- + model_path: str, default=None + Path to save/load the model file. If None, uses './Discrete.model'. + """ + if model_path is None: + model_path = DEFAULT_MODEL_PATH + self.model_path = model_path + self.model = None + self._ensure_model_downloaded() + self._load_model() + + self.args = { + 'window_size': 10, 'window_increment': 5, 'null_label': 0, 'feature_list': None, 'template_size': 250, 'min_template_size': 150, 'gesture_mapping': ['Nothing', 'Close', 'Flexion', 'Extension', 'Open', 'Pinch'], 'buffer_size': 5, + } + + print("This model has defined args (self.args) which need to be passed into the OnlineDiscreteClassifier.") + + def _ensure_model_downloaded(self): + """Download the pretrained model if not already present.""" + if os.path.exists(self.model_path): + return + + model_dir = os.path.dirname(self.model_path) + if model_dir: + os.makedirs(model_dir, exist_ok=True) + + print(f"Downloading model to {self.model_path}...") + urllib.request.urlretrieve(MODEL_URL, self.model_path) + print("Download complete.") + + def _load_model(self): + """Load the pretrained model from disk.""" + import sys + # Register DiscreteClassifier in sys.modules so torch.load can find it + # (the saved model was pickled with DiscreteClassifier as the module name) + sys.modules['DiscreteClassifier'] = sys.modules[__name__] + + device = 'cuda' if torch.cuda.is_available() else 'cpu' + self.model = torch.load(self.model_path, map_location=device, weights_only=False) + self.model.eval() + + def predict(self, x, device='cpu'): + """Predict class labels for input samples. + + Parameters + ---------- + x: ndarray or torch.Tensor + Input EMG data of shape (batch_size, seq_len, channels, samples). + device: str, default='cpu' + Device to run inference on ('cpu' or 'cuda'). + + Returns + ------- + ndarray + Predicted class labels for each sample. + """ + return self.model.predict(x, device=device) + + def predict_proba(self, x, device='cpu'): + """Predict class probabilities for input samples. + + Parameters + ---------- + x: ndarray or torch.Tensor + Input EMG data of shape (batch_size, seq_len, channels, samples). + device: str, default='cpu' + Device to run inference on ('cpu' or 'cuda'). + + Returns + ------- + ndarray + Predicted class probabilities of shape (batch_size, n_classes). + """ + return self.model.predict_proba(x, device=device) \ No newline at end of file diff --git a/libemg/_discrete_models/__init__.py b/libemg/_discrete_models/__init__.py new file mode 100644 index 00000000..912d1cd6 --- /dev/null +++ b/libemg/_discrete_models/__init__.py @@ -0,0 +1,3 @@ +from libemg._discrete_models.MVLDA import MVLDA +from libemg._discrete_models.DTW import DTWClassifier +from libemg._discrete_models.MyoCrossUser import MyoCrossUserPretrained \ No newline at end of file diff --git a/libemg/_gui/_data_collection_panel.py b/libemg/_gui/_data_collection_panel.py index 2adf0e56..5abb62c2 100644 --- a/libemg/_gui/_data_collection_panel.py +++ b/libemg/_gui/_data_collection_panel.py @@ -22,24 +22,26 @@ def __init__(self, data_folder='data/', rest_time=2, auto_advance=True, + discrete=False, exclude_files=[], gui = None, video_player_width = 720, video_player_height = 480): - + self.num_reps = num_reps self.rep_time = rep_time self.media_folder = media_folder self.data_folder = data_folder self.rest_time = rest_time self.auto_advance=auto_advance + self.discrete = discrete self.exclude_files = exclude_files self.gui = gui self.video_player_width = video_player_width self.video_player_height = video_player_height self.widget_tags = {"configuration":['__dc_configuration_window','__dc_num_reps','__dc_rep_time','__dc_rest_time', '__dc_media_folder',\ - '__dc_auto_advance'], + '__dc_auto_advance', '__dc_discrete'], "collection": ['__dc_collection_window', '__dc_prompt_spacer', '__dc_prompt', '__dc_progress', '__dc_redo_button'], "visualization": ['__vls_visualize_window']} @@ -54,8 +56,16 @@ def spawn_configuration_window(self): self.cleanup_window("configuration") self.cleanup_window("collection") self.cleanup_window("visualization") + + # Get UI scale from main GUI (default to 1.0 if not set) + ui_scale = getattr(self.gui, 'ui_scale', 1.0) + input_width = int(100 * ui_scale) + folder_input_width = int(250 * ui_scale) + button_width = int(80 * ui_scale) + button_height = int(30 * ui_scale) + with dpg.window(tag="__dc_configuration_window", label="Data Collection Configuration"): - + dpg.add_text(label="Training Menu") with dpg.table(header_row=False, resizable=True, policy=dpg.mvTable_SizingStretchProp, borders_outerH=True, borders_innerV=True, borders_innerH=True, borders_outerV=True): @@ -64,44 +74,50 @@ def spawn_configuration_window(self): dpg.add_table_column(label="") dpg.add_table_column(label="") # REP ROW - with dpg.table_row(): + with dpg.table_row(): with dpg.group(horizontal=True): dpg.add_text("Num Reps: ") dpg.add_input_text(default_value=self.num_reps, tag="__dc_num_reps", - width=100) + width=input_width) with dpg.group(horizontal=True): dpg.add_text("Time Per Rep") dpg.add_input_text(default_value=self.rep_time, tag="__dc_rep_time", - width=100) + width=input_width) with dpg.group(horizontal=True): dpg.add_text("Time Between Reps") dpg.add_input_text(default_value=self.rest_time, - tag="__dc_rest_time", - width=100) + tag="__dc_rest_time", + width=input_width) # FOLDER ROW with dpg.table_row(): with dpg.group(horizontal=True): dpg.add_text("Media Folder:") - dpg.add_input_text(default_value=self.media_folder, - tag="__dc_media_folder", width=250) + dpg.add_input_text(default_value=self.media_folder, + tag="__dc_media_folder", width=folder_input_width) with dpg.group(horizontal=True): dpg.add_text("Output Folder:") - dpg.add_input_text(default_value=self.data_folder, + dpg.add_input_text(default_value=self.data_folder, tag="__dc_output_folder", - width=250) + width=folder_input_width) # CHECKBOX ROW with dpg.table_row(): with dpg.group(horizontal=True): dpg.add_text("Auto-Advance") dpg.add_checkbox(default_value=self.auto_advance, tag="__dc_auto_advance") + with dpg.group(horizontal=True): + dpg.add_text("Discrete Mode (Spacebar)") + dpg.add_checkbox(default_value=self.discrete, + tag="__dc_discrete") # BUTTON ROW with dpg.table_row(): with dpg.group(horizontal=True): - dpg.add_button(label="Start", callback=self.start_callback) - dpg.add_button(label="Visualize", callback=self.visualize_callback) + dpg.add_button(label="Start", callback=self.start_callback, + width=button_width, height=button_height) + dpg.add_button(label="Visualize", callback=self.visualize_callback, + width=button_width, height=button_height) # dpg.set_primary_window("__dc_configuration_window", True) @@ -127,6 +143,7 @@ def get_settings(self): self.media_folder = dpg.get_value("__dc_media_folder") self.output_folder = dpg.get_value("__dc_output_folder") self.auto_advance = bool(dpg.get_value("__dc_auto_advance")) + self.discrete = bool(dpg.get_value("__dc_discrete")) def gather_media(self): # find everything in the media folder @@ -202,6 +219,10 @@ def spawn_collection_window(self, media_list): with dpg.group(horizontal=True): dpg.add_spacer(height=20,width=30) dpg.add_progress_bar(tag="__dc_progress", default_value=0.0,width=self.video_player_width) + if self.discrete: + with dpg.group(horizontal=True): + dpg.add_spacer(tag="__dc_discrete_spacer", height=20, width=self.video_player_width/2+30 - (7*len("Hold SPACEBAR to record"))/2) + dpg.add_text("Hold SPACEBAR to record", tag="__dc_discrete_hint", color=(255, 200, 0)) with dpg.group(horizontal=True): dpg.add_spacer(tag="__dc_redo_spacer", height=20, width=self.video_player_width/2+30 - (7*len("Redo"))/2) dpg.add_button(tag="__dc_redo_button", label="Redo", callback=self.redo_collection_callback) @@ -229,14 +250,17 @@ def run_sgt(self, media_list): while self.i < len(media_list): self.rep_buffer = {mod:[] for mod in self.gui.online_data_handler.modalities} self.rep_count = {mod:0 for mod in self.gui.online_data_handler.modalities} - # do the rest - if self.rest_time and self.i < len(media_list): + # do the rest (skip in discrete mode) + if self.rest_time and self.i < len(media_list) and not self.discrete: self.play_collection_visual(media_list[self.i], active=False) media_list[self.i][0].reset() self.gui.online_data_handler.reset() - - self.play_collection_visual(media_list[self.i], active=True) - + + if self.discrete: + self.play_collection_visual_discrete(media_list[self.i]) + else: + self.play_collection_visual(media_list[self.i], active=True) + output_path = Path(self.output_folder, "C_" + str(media_list[self.i][2]) + "_R_" + str(media_list[self.i][3]) + ".csv").absolute().as_posix() self.save_data(output_path) last_rep = media_list[self.i][3] @@ -311,8 +335,51 @@ def play_collection_visual(self, media, active=True): self.rep_buffer[mod] = [vals[mod][:new_samples,:]] + self.rep_buffer[mod] self.rep_count[mod] = self.rep_count[mod] + new_samples - dpg.set_value("__dc_progress", value = progress) - + dpg.set_value("__dc_progress", value = progress) + + def play_collection_visual_discrete(self, media): + """Record while spacebar is held, stop when released.""" + # Display gesture name and grayscale image (waiting state) + dpg.set_value("__dc_prompt", value=media[1]) + dpg.set_item_width("__dc_prompt_spacer", width=self.video_player_width/2+30 - (7*len(media[1]))/2) + + texture = media[0].get_dpg_formatted_texture(width=self.video_player_width, height=self.video_player_height, grayscale=True) + set_texture("__dc_collection_visual", texture, self.video_player_width, self.video_player_height) + dpg.set_value("__dc_progress", value=0.0) + + # Wait for spacebar press + while not dpg.is_key_down(dpg.mvKey_Spacebar): + time.sleep(0.05) + + # Reset data handler and counters right when spacebar is pressed + # This ensures we only capture data from when recording actually starts + self.gui.online_data_handler.reset() + self.rep_buffer = {mod: [] for mod in self.gui.online_data_handler.modalities} + self.rep_count = {mod: 0 for mod in self.gui.online_data_handler.modalities} + + # Start recording - show in color + motion_timer = time.perf_counter_ns() + + # Record while spacebar held + while dpg.is_key_down(dpg.mvKey_Spacebar): + time.sleep(1/media[0].fps) + elapsed = (time.perf_counter_ns() - motion_timer) / 1e9 + + # Update visual + media[0].advance_to(elapsed) + texture = media[0].get_dpg_formatted_texture(width=self.video_player_width, height=self.video_player_height, grayscale=False) + set_texture("__dc_collection_visual", texture, self.video_player_width, self.video_player_height) + + # Update progress bar (scale to 10s for display purposes) + dpg.set_value("__dc_progress", value=min(1.0, elapsed / 10.0)) + + # Collect EMG data + vals, count = self.gui.online_data_handler.get_data() + for mod in self.gui.online_data_handler.modalities: + new_samples = count[mod][0][0] - self.rep_count[mod] + self.rep_buffer[mod] = [vals[mod][:new_samples, :]] + self.rep_buffer[mod] + self.rep_count[mod] = self.rep_count[mod] + new_samples + def save_data(self, filename): file_parts = filename.split('.') diff --git a/libemg/data_handler.py b/libemg/data_handler.py index a1cbdf6a..a148b974 100644 --- a/libemg/data_handler.py +++ b/libemg/data_handler.py @@ -423,34 +423,63 @@ def active_threshold(self, nm_windows, active_windows, active_labels, num_std=3, print(f"{num_relabeled} of {len(active_labels)} active class windows were relabelled to no motion.") return active_labels - def parse_windows(self, window_size, window_increment, metadata_operations=None): + def parse_windows(self, window_size, window_increment, metadata_operations=None, discrete=False): """Parses windows based on the acquired data from the get_data function. Parameters ---------- window_size: int - The number of samples in a window. + The number of samples in a window. window_increment: int The number of samples that advances before next window. - metadata_operations: dict or None (optional),default=None + metadata_operations: dict or None (optional), default=None Specifies which operations should be performed on metadata attributes when performing windowing. By default, all metadata is stored as its mode in a window. To change this behaviour, specify the metadata attribute as the key and the operation as the value in the dictionary. The operation (value) should either be an accepted string (mean, median, last_sample) or a function handle that takes in an ndarray of size (window_size, ) and returns a single value to represent the metadata for that window. Passing in a string will map from that string to the specified operation. The windowing of only the attributes specified in this dictionary will be modified - all other attributes will default to the mode. If None, all attributes default to the mode. Defaults to None. - + discrete: bool (optional), default=False + If True, keeps windows from each file/rep separate instead of concatenating them. + Useful for discrete gesture recognition where each rep should be treated independently. + Returns ---------- - list - A np.ndarray of size windows x channels x samples. - list - A dictionary containing np.ndarrays for each metadata tag of the dataset. Each window will - have an associated value for each metadata. Therefore, the dimensions of the metadata should be Wx1 for each field. + If discrete=False (default): + np.ndarray + A np.ndarray of size windows x channels x samples. + dict + A dictionary containing np.ndarrays for each metadata tag of the dataset. Each window will + have an associated value for each metadata. Therefore, the dimensions of the metadata should be Wx1 for each field. + + If discrete=True: + list + A list of np.ndarrays, one per file/rep. Each array has shape (num_windows, channels, samples). + dict + A dictionary containing np.ndarrays for each metadata tag. Each template/rep will have one + associated value for each metadata (the mode across windows). Dimensions are Tx1 where T is the number of templates. """ - return self._parse_windows_helper(window_size, window_increment, metadata_operations) + return self._parse_windows_helper(window_size, window_increment, metadata_operations, discrete) - def _parse_windows_helper(self, window_size, window_increment, metadata_operations): + def _parse_windows_helper(self, window_size, window_increment, metadata_operations, discrete=False): + """Internal helper for parse_windows. + + Parameters + ---------- + window_size: int + The number of samples in a window. + window_increment: int + The number of samples that advances before next window. + metadata_operations: dict or None + Operations to perform on metadata attributes during windowing. + discrete: bool, default=False + If True, keeps windows from each file/rep separate. + + Returns + ------- + tuple + Windows and metadata dictionary. + """ common_metadata_operations = { 'mean': np.mean, 'median': np.median, @@ -458,33 +487,43 @@ def _parse_windows_helper(self, window_size, window_increment, metadata_operatio } window_data = [] metadata = {k: [] for k in self.extra_attributes} + for i, file in enumerate(self.data): # emg data windowing - window_data.append(get_windows(file,window_size,window_increment)) - + file_windows = get_windows(file, window_size, window_increment) + window_data.append(file_windows) + for k in self.extra_attributes: - if type(getattr(self,k)[i]) != np.ndarray: - file_metadata = np.ones((window_data[-1].shape[0])) * getattr(self, k)[i] + if type(getattr(self, k)[i]) != np.ndarray: + attr_metadata = np.ones((file_windows.shape[0])) * getattr(self, k)[i] else: if metadata_operations is not None: if k in metadata_operations.keys(): # do the specified operation operation = metadata_operations[k] - + if isinstance(operation, str): try: operation = common_metadata_operations[operation] except KeyError as e: raise KeyError(f"Unexpected metadata operation string. Please pass in a function or an accepted string {tuple(common_metadata_operations.keys())}. Got: {operation}.") - file_metadata = _get_fn_windows(getattr(self,k)[i], window_size, window_increment, operation) + attr_metadata = _get_fn_windows(getattr(self, k)[i], window_size, window_increment, operation) else: - file_metadata = _get_mode_windows(getattr(self,k)[i], window_size, window_increment) + attr_metadata = _get_mode_windows(getattr(self, k)[i], window_size, window_increment) else: - file_metadata = _get_mode_windows(getattr(self,k)[i], window_size, window_increment) - - metadata[k].append(file_metadata) - - return np.vstack(window_data), {k: np.concatenate(metadata[k], axis=0) for k in metadata.keys()} + attr_metadata = _get_mode_windows(getattr(self, k)[i], window_size, window_increment) + + if discrete: + # For discrete mode, take single value per template (mode of all windows) + values, counts = np.unique(attr_metadata, return_counts=True) + metadata[k].append(values[np.argmax(counts)]) + else: + metadata[k].append(attr_metadata) + + if discrete: + return window_data, {k: np.array(metadata[k]) for k in metadata.keys()} + else: + return np.vstack(window_data), {k: np.concatenate(metadata[k], axis=0) for k in metadata.keys()} def isolate_channels(self, channels): diff --git a/libemg/emg_predictor.py b/libemg/emg_predictor.py index fd8c805e..c5a87b5a 100644 --- a/libemg/emg_predictor.py +++ b/libemg/emg_predictor.py @@ -1242,3 +1242,155 @@ def update(frame, decision_horizon_predictions, timestamps): _ = FuncAnimation(fig, partial(update, decision_horizon_predictions=[], timestamps=[]), interval=5, blit=False) # must return value or animation won't work plt.show() + + +class OnlineDiscreteClassifier: + """OnlineDiscreteClassifier. + + Real-time discrete gesture classifier that detects individual gestures from EMG data. + Unlike continuous classifiers, this classifier is designed for detecting discrete, + transient gestures and outputs a prediction only when a gesture is detected. + + Parameters + ---------- + odh: OnlineDataHandler + An online data handler object for streaming EMG data. + model: object + A trained model with a predict_proba method (e.g., from libemg discrete models). + window_size: int + The number of samples in a window. + window_increment: int + The number of samples that advances before the next window. + null_label: int + The label corresponding to the null/no gesture class. + feature_list: list or None + A list of features that will be extracted during real-time classification. + Pass in None if the model expects raw windowed data. + template_size: int + The maximum number of samples to use for gesture template matching. + min_template_size: int, default=None + The minimum number of samples required before attempting classification. + If None, defaults to template_size. + key_mapping: dict, default=None + A dictionary mapping gesture names to keyboard keys for automated key presses. + Requires pyautogui to be installed. + feature_dic: dict, default=None + A dictionary containing feature extraction parameters. + gesture_mapping: dict, default=None + A dictionary mapping class indices to gesture names for debug output. + rejection_threshold: float, default=0.0 + The confidence threshold (0-1). Predictions with confidence below this + threshold will be rejected and treated as null gestures. + debug: bool, default=True + If True, prints accepted gestures with timestamps and confidence values. + buffer_size: int, default=1 + Number of successive predictions to buffer before accepting a gesture. + When buffer_size > 1, the mode (most frequent prediction) across the buffer + is used to determine the final prediction. This helps filter noisy predictions. + """ + + def __init__( + self, + odh, + model, + window_size, + window_increment, + null_label, + feature_list, + template_size, + min_template_size=None, + key_mapping=None, + feature_dic={}, + gesture_mapping=None, + rejection_threshold=0.0, + debug=True, + buffer_size=1 + ): + self.odh = odh + self.window_size = window_size + self.window_increment = window_increment + self.feature_list = feature_list + self.model = model + self.null_label = null_label + self.template_size = template_size + self.min_template_size = min_template_size if min_template_size is not None else template_size + self.key_mapping = key_mapping + self.feature_dic = feature_dic + self.gesture_mapping = gesture_mapping + self.rejection_threshold = rejection_threshold + self.debug = debug + self.buffer_size = buffer_size + self.prediction_buffer = deque(maxlen=buffer_size) + self.fe = FeatureExtractor() + + def run(self): + """Run the main gesture detection loop. + + Continuously monitors EMG data and detects discrete gestures. Uses predict_proba + to apply an optional rejection threshold. When buffer_size > 1, takes the mode + across multiple successive predictions before accepting a gesture. + + The loop runs indefinitely until interrupted. When a gesture is detected and + accepted (passes rejection threshold and buffer consensus), the data handler + is reset and the prediction buffer is cleared. + """ + expected_count = self.min_template_size + + while True: + # Get and process EMG data + _, counts = self.odh.get_data(self.window_size) + if counts['emg'][0][0] >= expected_count: + data, _ = self.odh.get_data(self.template_size) + emg = data['emg'][::-1] + feats = get_windows(emg, window_size=self.window_size, window_increment=self.window_increment) + if self.feature_list is not None: + feats = self.fe.extract_features(self.feature_list, feats, array=True, feature_dic=self.feature_dic) + + probas = self.model.predict_proba(np.array([feats]))[0] + + # Get the class with the highest probability + pred = np.argmax(probas) + confidence = probas[pred] + + # Check rejection threshold + if confidence < self.rejection_threshold: + pred = self.null_label + + # Add prediction to buffer + self.prediction_buffer.append(pred) + + # Check if buffer is full and compute mode + if len(self.prediction_buffer) >= self.buffer_size: + # Get mode of buffer predictions + buffer_list = list(self.prediction_buffer) + mode_result = stats.mode(buffer_list, keepdims=False) + buffered_pred = mode_result[0] + + if buffered_pred != self.null_label: + if self.debug: + label = self.gesture_mapping[buffered_pred] if self.gesture_mapping else buffered_pred + print(f"{time.time()} ACCEPTED: {label} (Conf: {confidence:.2f})") + + if self.key_mapping is not None: + self._key_press(buffered_pred) + + self.odh.reset() + self.prediction_buffer.clear() + expected_count = self.min_template_size + else: + expected_count += self.window_increment + else: + expected_count += self.window_increment + + def _key_press(self, pred): + """Trigger a keyboard press for the predicted gesture. + + Parameters + ---------- + pred: int + The predicted class index to map to a key press. + """ + import pyautogui + gesture_name = self.gesture_mapping[pred] + if gesture_name in self.key_mapping: + pyautogui.press(self.key_mapping[gesture_name]) \ No newline at end of file diff --git a/libemg/feature_extractor.py b/libemg/feature_extractor.py index 2ebf6b7a..82a1cd0f 100644 --- a/libemg/feature_extractor.py +++ b/libemg/feature_extractor.py @@ -134,33 +134,78 @@ def extract_feature_group(self, feature_group, windows, feature_dic={}, array=Fa return self._format_data(feats) return feats - def extract_features(self, feature_list, windows, feature_dic={}, array=False, normalize=False, normalizer=None, fix_feature_errors=False): + def extract_features(self, feature_list, windows, feature_dic={}, array=False, normalize=False, normalizer=None, fix_feature_errors=False, discrete=False): """Extracts a list of features. - + Parameters ---------- feature_list: list - The group of features to extract. Run get_feature_list() or checkout the API documentation - to find an up-to-date feature list. - windows: list + The group of features to extract. Run get_feature_list() or checkout the API documentation + to find an up-to-date feature list. + windows: list A list of windows - should be computed directly from the OfflineDataHandler or the utils.get_windows() method. feature_dic: dict A dictionary containing the parameters you'd like passed to each feature. ex. {"MDF_sf":1000} - array: bool (optional), default=False + array: bool (optional), default=False If True, the dictionary will get converted to a list. normalize: bool (optional), default=False If True, the features will be normalized between using sklearn StandardScaler. The returned object will be a list. normalizer: StandardScaler, default=None - This should be set to the output from feature extraction on the training data. Do not normalize testing features without this as this could be considered information leakage. + This should be set to the output from feature extraction on the training data. Do not normalize testing features without this as this could be considered information leakage. fix_feature_errors: bool (optional), default=False If true, fixes all feature errors (NaN=0, INF=0, -INF=0). + discrete: bool (optional), default=False + If True, windows is expected to be a list of templates (from parse_windows with discrete=True). + Features will be extracted for each template separately and returned as a list. + Note: Normalization is not currently supported in discrete mode. + Returns ---------- - dictionary or list - A dictionary where each key is a specific feature and its value is a list of the computed - features for each window. - StandardScaler - If normalize is true it will return the normalizer object. This should be passed into the feature extractor for test data. + dictionary or np.ndarray + When discrete=False: A dictionary where each key is a specific feature and its value is a list + of the computed features for each window. If array=True, returns a np.ndarray instead. + When discrete=True: A list of dictionaries/arrays (one per template). If array=True, each + element is a np.ndarray. + tuple (np.ndarray, StandardScaler) + If normalize=True (only supported when discrete=False), returns a tuple of (features array, scaler). + The scaler should be passed into the feature extractor for test data. + """ + if discrete: + if normalize: + raise ValueError("Normalization is not currently supported in discrete mode.") + # Handle discrete mode: windows is a list of templates + all_features = [] + for template in windows: + template_features = self._extract_features_single(feature_list, template, feature_dic, array, fix_feature_errors) + all_features.append(template_features) + return all_features + + return self._extract_features_single(feature_list, windows, feature_dic, array, fix_feature_errors, normalize, normalizer) + + def _extract_features_single(self, feature_list, windows, feature_dic={}, array=False, fix_feature_errors=False, normalize=False, normalizer=None): + """Internal method to extract features from a single set of windows. + + Parameters + ---------- + feature_list: list + The group of features to extract. + windows: np.ndarray + A 3D array of windows with shape (num_windows, num_channels, window_size). + feature_dic: dict + A dictionary containing the parameters you'd like passed to each feature. + array: bool (optional), default=False + If True, the dictionary will get converted to a list. + fix_feature_errors: bool (optional), default=False + If true, fixes all feature errors (NaN=0, INF=0, -INF=0). + normalize: bool (optional), default=False + If True, the features will be normalized. + normalizer: StandardScaler, default=None + The normalizer to use for normalization. + + Returns + ---------- + dictionary or np.ndarray + The extracted features. """ features = {} scaler = None @@ -184,7 +229,7 @@ def extract_features(self, feature_list, windows, feature_dic={}, array=False, n features = scaler.fit_transform(features) else: features = normalizer.transform(features) - return features, scaler + return features, scaler return features def check_features(self, features, silent=False): diff --git a/libemg/gui.py b/libemg/gui.py index 45b4d020..4cae2558 100644 --- a/libemg/gui.py +++ b/libemg/gui.py @@ -20,12 +20,14 @@ class GUI: ---------- online_data_handler: OnlineDataHandler Online data handler used for acquiring raw EMG data. - args: dic, default={'media_folder': 'images/', 'data_folder':'data/', 'num_reps': 3, 'rep_time': 5, 'rest_time': 3, 'auto_advance': True} - The dictionary that defines the SGT window. Keys are: 'media_folder', - 'data_folder', 'num_reps', 'rep_time', 'rest_time', and 'auto_advance'. All media (i.e., images and videos) in 'media_folder' will be played in alphabetical order. + args: dic, default={'media_folder': 'images/', 'data_folder':'data/', 'num_reps': 3, 'rep_time': 5, 'rest_time': 3, 'auto_advance': True, 'discrete': False} + The dictionary that defines the SGT window. Keys are: 'media_folder', + 'data_folder', 'num_reps', 'rep_time', 'rest_time', 'auto_advance', and 'discrete'. All media (i.e., images and videos) in 'media_folder' will be played in alphabetical order. For video files, a matching labels file of the same name will be searched for and added to the 'data_folder' if found. 'rep_time' is only used for images since the duration of videos is automatically calculated based on - the number of frames (assumed to be 24 FPS). + the number of frames (assumed to be 24 FPS). When 'discrete' is True, recording is controlled by + holding the spacebar instead of using a timer - recording starts when spacebar is pressed and + stops when released. width: int, default=1920 The width of the SGT window. height: int, default=1080 @@ -39,7 +41,7 @@ class GUI: """ def __init__(self, online_data_handler, - args={'media_folder': 'images/', 'data_folder':'data/', 'num_reps': 3, 'rep_time': 5, 'rest_time': 3, 'auto_advance': True}, + args={'media_folder': 'images/', 'data_folder':'data/', 'num_reps': 3, 'rep_time': 5, 'rest_time': 3, 'auto_advance': True, 'discrete': False}, width=1920, height=1080, debug=False, @@ -73,8 +75,33 @@ def _window_init(self, width, height, debug=False): dpg.create_viewport(title="LibEMG", width=width, height=height) + + # Scale UI based on viewport height (base: 1080p -> 18pt font) + self.ui_scale = height / 1080.0 + font_size = int(18 * self.ui_scale) + + # Load a proper TTF font for crisp text at any size + font_paths = [ + "/System/Library/Fonts/SFNS.ttf", # macOS San Francisco + "/System/Library/Fonts/Helvetica.ttc", # macOS Helvetica + "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", # Linux + "C:/Windows/Fonts/segoeui.ttf", # Windows + ] + with dpg.font_registry(): + font_loaded = False + for font_path in font_paths: + if os.path.exists(font_path): + default_font = dpg.add_font(font_path, font_size) + font_loaded = True + break + if not font_loaded: + # Fallback: scale the default bitmap font (lower quality) + default_font = None + dpg.set_global_font_scale(self.ui_scale) + if font_loaded: + dpg.bind_font(default_font) + dpg.setup_dearpygui() - self._file_menu_init() diff --git a/requirements.txt b/requirements.txt index d799b59b..e8d572e1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,4 +32,6 @@ h5py onedrivedownloader sifi-bridge-py mindrove -crc \ No newline at end of file +crc +torch +tslearn \ No newline at end of file diff --git a/setup.py b/setup.py index 6177503c..ffc6592a 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ # python -m twine upload --repository testpypi dist/* --verbose <------ testpypi # -VERSION = "2.0.1" +VERSION = "3.0.0b1" DESCRIPTION = "LibEMG - Myoelectric Control Library" LONG_DESCRIPTION = "A library for designing and exploring real-time and offline myoelectric control systems." @@ -45,8 +45,12 @@ "sifi-bridge-py", "pygame", "mindrove", - "crc" + "crc", + "tslearn" ], + extras_require={ + "torch": ["torch"], + }, keywords=[ "emg", "myoelectric_control",