Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "study-lyte"
version = "0.10.2"
version = "0.10.3"
description = "Analysis software for the Lyte probe, a digital penetrometer for studying snow"
keywords = ["snow penetrometer", "smart probe", "digital penetrometer", 'lyte probe', "avalanches", "snow densiy"]
readme = "README.rst"
Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.10.2
current_version = 0.10.3
commit = True
tag = True

Expand Down
2 changes: 1 addition & 1 deletion study_lyte/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

__author__ = """Micah Johnson """
__email__ = 'info@adventuredata.com'
__version__ = '0.10.2'
__version__ = '0.10.3'
22 changes: 8 additions & 14 deletions study_lyte/detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,20 +115,14 @@ def get_signal_event(signal_series, threshold=0.001, search_direction='forward',

# if we have results, find the first match with n points that meet the criteria
if n_points > 1 and len(ind) > 0:
npnts = n_points - 1
id_diff = np.ones_like(ind) * 0
id_diff[1:] = (ind[1:] - ind[0:-1])
id_diff[0] = 1
id_diff = np.abs(id_diff)
spacing_ind = []

# Determine if the last n points are all 1 idx apart
for i, ix in enumerate(ind):
if i >= npnts:
test_arr = id_diff[i - npnts:i + 1]
if all(test_arr == 1):
spacing_ind.append(ix)
ind = spacing_ind
# Vectorized consecutive point detection
diffs = np.diff(ind)
# Find runs of consecutive indices (diff == 1)
consecutive = np.concatenate([[False], diffs == 1])
# Count consecutive runs
for i in range(n_points - 2):
consecutive[1:] = consecutive[1:] & consecutive[:-1]
ind = ind[consecutive]

# If no results are found, return the first index the series
if len(ind) == 0:
Expand Down
35 changes: 26 additions & 9 deletions study_lyte/io.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from typing import Tuple
from pathlib import Path
from typing import Tuple, Union
import pandas as pd
import numpy as np

Expand All @@ -24,19 +25,35 @@ def find_metadata(f:str) -> [int, dict]:
break
return header_position, metadata

def read_data(f:str, metadata:dict, header_position:int) -> Tuple[pd.DataFrame, dict]:
"""Read just the csv to enable parsing metadata and header position separately"""
df = pd.read_csv(f, header=header_position)
# Drop any columns written with the plain index
df.drop(df.filter(regex="Unname"), axis=1, inplace=True)

if 'time' not in df and 'SAMPLE RATE' in metadata:
def read_data(f: Union[str, Path], metadata: dict, header_position: int) -> Tuple[pd.DataFrame, dict]:
"""
Reads just the data from the Lyte probe CSV file
Args:
f: Path to csv, or file buffer
metadata: Dictionary of metadata from the header
header_position: Line number where the header ends
Returns:
tuple:
**df**: pandas Dataframe
**metadata**: dictionary containing header info
"""
# Use engine='c' explicitly and specify dtypes if known
df = pd.read_csv(f, header=header_position, engine='c')

# Faster column dropping - avoid regex
unnamed_cols = [c for c in df.columns if c.startswith('Unnamed')]
if unnamed_cols:
df.drop(columns=unnamed_cols, inplace=True)

if 'time' not in df.columns and 'SAMPLE RATE' in metadata:
sr = int(metadata['SAMPLE RATE'])
n = len(df)
df['time'] = np.linspace(0, n/sr, n)
df['time'] = np.linspace(0, n / sr, n)
return df, metadata

def read_csv(f: str) -> Tuple[pd.DataFrame, dict]:

def read_csv(f: Union[str, Path]) -> Tuple[pd.DataFrame, dict]:
"""
Reads any Lyte probe CSV and returns a dataframe
and metadata dictionary from the header
Expand Down
15 changes: 6 additions & 9 deletions study_lyte/profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,16 +300,13 @@ def has_upward_motion(self):
Bool indicating if upward motion was detected
"""
if self._has_upward_motion is None:
self._has_upward_motion = False
# crop the depth data and down sample for speedy check
# crop the depth data and downsample for speedy check
n = get_points_from_fraction(len(self.depth), 0.005)
coarse = self.depth.iloc[self.start.index:self.stop.index:n]
# loop and find any values greater than the current value
for i,v in coarse.items():
upward = np.any(coarse.loc[i:] > v + 5)
if upward:
self._has_upward_motion = True
break
data = self.depth.iloc[self.start.index:self.stop.index:n].values

# Vectorized: check if any point rises > 5 above the running minimum
cummin = np.minimum.accumulate(data)
self._has_upward_motion = bool(np.any(data - cummin > 5))

return self._has_upward_motion

Expand Down
6 changes: 3 additions & 3 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,16 @@
from os.path import dirname, join
from study_lyte.io import read_csv
from study_lyte.profile import LyteProfileV6

from pathlib import Path

@pytest.fixture(scope='session')
def data_dir():
return join(dirname(__file__), 'data')
return Path(__file__).parent.joinpath('data')


@pytest.fixture(scope='function')
def raw_df(data_dir, fname):
df, meta = read_csv(join(data_dir, fname))
df, meta = read_csv(data_dir.joinpath(fname))
return df

@pytest.fixture(scope='function')
Expand Down