Skip to content

Commit 00a6fbc

Browse files
authored
Merge pull request #7 from pmclSF/chore/migrate-flake8-to-ruff
Migrate from flake8 to ruff
2 parents f68d4a8 + 2a840f6 commit 00a6fbc

55 files changed

Lines changed: 1043 additions & 582 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.github/workflows/ci.yml

Lines changed: 12 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,20 @@ on:
77
branches: [main]
88

99
jobs:
10+
lint:
11+
runs-on: ubuntu-latest
12+
steps:
13+
- uses: actions/checkout@v4
14+
- uses: actions/setup-python@v5
15+
with:
16+
python-version: '3.10'
17+
- run: pip install ruff
18+
- name: Ruff check
19+
run: ruff check src/ tests/
20+
1021
test:
1122
runs-on: ubuntu-latest
23+
needs: lint
1224
strategy:
1325
matrix:
1426
python-version: ['3.8', '3.9', '3.10']
@@ -29,7 +41,6 @@ jobs:
2941
3042
- name: Run tests
3143
run: |
32-
# Run entropy modeling tests and performance tests (legacy tests have pre-existing issues)
3344
pytest \
3445
tests/test_entropy_parameters.py \
3546
tests/test_context_model.py \
@@ -48,36 +59,3 @@ jobs:
4859
uses: codecov/codecov-action@v4
4960
with:
5061
file: ./coverage.xml
51-
52-
lint:
53-
runs-on: ubuntu-latest
54-
steps:
55-
- uses: actions/checkout@v4
56-
- uses: actions/setup-python@v5
57-
with:
58-
python-version: '3.10'
59-
- run: pip install flake8
60-
# Lint only the new entropy modeling and optimization files (legacy files have pre-existing issues)
61-
- name: Lint new source files
62-
run: |
63-
flake8 \
64-
src/entropy_parameters.py \
65-
src/entropy_model.py \
66-
src/context_model.py \
67-
src/channel_context.py \
68-
src/attention_context.py \
69-
src/model_transforms.py \
70-
src/constants.py \
71-
src/precision_config.py \
72-
src/benchmarks.py \
73-
--max-line-length=120
74-
- name: Lint new test files
75-
run: |
76-
flake8 \
77-
tests/test_entropy_parameters.py \
78-
tests/test_context_model.py \
79-
tests/test_channel_context.py \
80-
tests/test_attention_context.py \
81-
tests/test_performance.py \
82-
--max-line-length=120 \
83-
--ignore=E402,W503 # E402: imports after sys.path, W503: PEP8 updated to prefer breaks before operators

CLAUDE.md

Lines changed: 388 additions & 0 deletions
Large diffs are not rendered by default.

pyproject.toml

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
[tool.ruff]
2+
line-length = 120
3+
target-version = "py38"
4+
5+
[tool.ruff.lint]
6+
select = ["F", "I", "E", "W"]
7+
ignore = [
8+
"E402", # module-level import not at top — tests use sys.path before imports
9+
"E741", # ambiguous variable name — common in math-heavy code
10+
]
11+
12+
[tool.ruff.lint.per-file-ignores]
13+
"tests/*.py" = ["E402"]
14+
15+
[tool.ruff.lint.isort]
16+
known-first-party = [
17+
"model_transforms", "entropy_model", "entropy_parameters",
18+
"context_model", "channel_context", "attention_context",
19+
"constants", "precision_config", "data_loader",
20+
"training_pipeline", "evaluation_pipeline", "experiment",
21+
"compress_octree", "decompress_octree", "octree_coding",
22+
"ds_mesh_to_pc", "ds_pc_octree_blocks", "ds_select_largest",
23+
"ev_compare", "ev_run_render", "mp_report", "mp_run",
24+
"quick_benchmark", "benchmarks", "parallel_process",
25+
"point_cloud_metrics", "map_color", "colorbar",
26+
"cli_train", "test_utils",
27+
]

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,4 @@ pytest~=7.1.0
99
scipy~=1.8.1
1010
numba~=0.56.0
1111
tensorflow-probability~=0.19.0
12+
ruff>=0.4.0

src/attention_context.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,9 @@
1111
- Global tokens provide long-range context without full attention
1212
"""
1313

14+
from typing import Any, Dict, Optional, Tuple
15+
1416
import tensorflow as tf
15-
from typing import Tuple, Optional, Dict, Any
1617

1718
from constants import LOG_2_RECIPROCAL
1819

@@ -668,8 +669,8 @@ def __init__(self,
668669
self.num_attention_layers = num_attention_layers
669670

670671
# Import here to avoid circular dependency
671-
from entropy_parameters import EntropyParameters
672672
from entropy_model import ConditionalGaussian
673+
from entropy_parameters import EntropyParameters
673674

674675
# Hyperprior-based parameter prediction
675676
self.entropy_parameters = EntropyParameters(
@@ -803,9 +804,9 @@ def __init__(self,
803804
self.num_channel_groups = num_channel_groups
804805
self.num_attention_layers = num_attention_layers
805806

806-
from entropy_parameters import EntropyParameters
807807
from channel_context import ChannelContext
808808
from entropy_model import ConditionalGaussian
809+
from entropy_parameters import EntropyParameters
809810

810811
# Hyperprior parameters
811812
self.entropy_parameters = EntropyParameters(

src/benchmarks.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,12 @@
2020
print(f"Peak memory: {mem.peak_mb:.1f} MB")
2121
"""
2222

23-
import tensorflow as tf
2423
import time
25-
from typing import Callable, Dict, Any, Optional
26-
from dataclasses import dataclass, field
2724
from contextlib import contextmanager
25+
from dataclasses import dataclass, field
26+
from typing import Any, Callable, Dict, Optional
27+
28+
import tensorflow as tf
2829

2930

3031
@dataclass

src/channel_context.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,9 @@
77
maintaining autoregressive structure across groups.
88
"""
99

10+
from typing import Any, Dict, List, Optional, Tuple
11+
1012
import tensorflow as tf
11-
from typing import Tuple, Optional, Dict, Any, List
1213

1314
from constants import LOG_2_RECIPROCAL
1415

@@ -230,8 +231,8 @@ def __init__(self,
230231
self.channels_per_group = latent_channels // num_groups
231232

232233
# Import here to avoid circular dependency
233-
from entropy_parameters import EntropyParameters
234234
from entropy_model import ConditionalGaussian
235+
from entropy_parameters import EntropyParameters
235236

236237
# Hyperprior-based parameter prediction
237238
self.entropy_parameters = EntropyParameters(

src/cli_train.py

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,25 @@
1-
import tensorflow as tf
2-
import os
31
import argparse
42
import glob
5-
import numpy as np
3+
import os
4+
65
import keras_tuner as kt
6+
import tensorflow as tf
7+
78
from ds_mesh_to_pc import read_off
89

10+
911
def create_model(hp):
1012
model = tf.keras.Sequential()
1113
model.add(tf.keras.layers.InputLayer(input_shape=(2048, 3)))
12-
14+
1315
for i in range(hp.Int('num_layers', 1, 5)):
1416
model.add(tf.keras.layers.Dense(
1517
hp.Int(f'layer_{i}_units', min_value=64, max_value=1024, step=64),
1618
activation='relu'
1719
))
18-
20+
1921
model.add(tf.keras.layers.Dense(3, activation='sigmoid'))
20-
22+
2123
model.compile(
2224
optimizer=tf.keras.optimizers.Adam(
2325
learning_rate=hp.Float('learning_rate', 1e-5, 1e-3, sampling='log')
@@ -28,7 +30,7 @@ def create_model(hp):
2830

2931
def load_and_preprocess_data(input_dir, batch_size):
3032
file_paths = glob.glob(os.path.join(input_dir, "*.ply"))
31-
33+
3234
def parse_ply_file(file_path):
3335
mesh_data = read_off(file_path)
3436
return mesh_data.vertices
@@ -47,7 +49,7 @@ def data_generator():
4749
dataset = dataset.shuffle(buffer_size=len(file_paths))
4850
dataset = dataset.batch(batch_size)
4951
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
50-
52+
5153
return dataset
5254

5355
def tune_hyperparameters(input_dir, output_dir, num_epochs=10):
@@ -63,10 +65,10 @@ def tune_hyperparameters(input_dir, output_dir, num_epochs=10):
6365

6466
dataset = load_and_preprocess_data(input_dir, batch_size=32)
6567
tuner.search(dataset, epochs=num_epochs, validation_data=dataset)
66-
68+
6769
best_model = tuner.get_best_models(num_models=1)[0]
6870
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
69-
71+
7072
print("Best Hyperparameters:", best_hps.values)
7173
best_model.save(os.path.join(output_dir, 'best_model'))
7274

@@ -95,4 +97,4 @@ def main():
9597
model.save(os.path.join(args.output_dir, 'trained_model'))
9698

9799
if __name__ == "__main__":
98-
main()
100+
main()

src/colorbar.py

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
1-
import numpy as np
2-
import matplotlib.pyplot as plt
3-
import matplotlib as mpl
4-
from typing import Tuple, Callable, Optional, List
5-
from dataclasses import dataclass
61
import json
2+
from dataclasses import dataclass
73
from pathlib import Path
4+
from typing import Callable, List, Optional, Tuple
5+
6+
import matplotlib as mpl
7+
import matplotlib.pyplot as plt
8+
import numpy as np
9+
810

911
@dataclass
1012
class ColorbarConfig:
@@ -67,7 +69,10 @@ def get_colorbar(
6769
else:
6870
# Format numeric labels
6971
formatter = mpl.ticker.FormatStrFormatter(label_format)
70-
cbar.ax.xaxis.set_major_formatter(formatter) if orientation == 'horizontal' else cbar.ax.yaxis.set_major_formatter(formatter)
72+
if orientation == 'horizontal':
73+
cbar.ax.xaxis.set_major_formatter(formatter)
74+
else:
75+
cbar.ax.yaxis.set_major_formatter(formatter)
7176

7277
# Set font sizes
7378
cbar.ax.tick_params(labelsize=font_size)
@@ -96,10 +101,10 @@ def save_color_mapping(filename: str,
96101
# Create mapping
97102
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
98103
cmap = plt.get_cmap(cmap)
99-
104+
100105
values = np.linspace(vmin, vmax, num_samples)
101106
colors = [cmap(norm(v)) for v in values]
102-
107+
103108
# Save to file
104109
Path(filename).parent.mkdir(parents=True, exist_ok=True)
105110
with open(filename, 'w') as f:
@@ -122,4 +127,4 @@ def save_color_mapping(filename: str,
122127
tick_rotation=45,
123128
extend='both'
124129
)
125-
plt.show()
130+
plt.show()

0 commit comments

Comments
 (0)