Skip to content

Commit fb77f21

Browse files
add docstring for Docstring coverage
1 parent d2bb7ad commit fb77f21

3 files changed

Lines changed: 4 additions & 0 deletions

File tree

examples/llm_ptq/example_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -209,6 +209,7 @@ def build_quant_cfg(
209209
model_type,
210210
moe_calib_experts_ratio: float | None = None,
211211
) -> dict[str, Any]:
212+
"""Build quantization config with model-specific overrides for AWQ, SmoothQuant, and VLM."""
212213
quant_cfg = copy.deepcopy(quant_cfg)
213214
if "awq" in str(quant_cfg.get("algorithm")):
214215
from modelopt.torch.quantization.config import find_quant_cfg_entry_by_path

tests/_test_utils/torch/transformers_models.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ def get_tiny_qwen3_moe(**config_kwargs) -> PreTrainedModel:
112112
def create_tiny_qwen3_moe_dir(
113113
tmp_path: Path | str, with_tokenizer: bool = False, **config_kwargs
114114
) -> Path:
115+
"""Save a tiny Qwen3 MoE model (and optional tokenizer) to a temp directory."""
115116
qwen3_moe_dir = Path(tmp_path) / "tiny_qwen3_moe"
116117
if with_tokenizer:
117118
tokenizer = AutoTokenizer.from_pretrained(
@@ -157,6 +158,7 @@ def get_tiny_qwen3_5(**config_kwargs) -> PreTrainedModel:
157158

158159
##### GPT-OSS #####
159160
def get_tiny_gpt_oss(**config_kwargs) -> PreTrainedModel:
161+
"""Create a tiny GPT-OSS MoE model for testing."""
160162
set_seed(SEED)
161163

162164
kwargs = {

tests/unit/torch/quantization/plugins/test_huggingface.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,7 @@ def test_is_homogeneous_hf_model_gpt_oss():
236236

237237

238238
def test_hf_decoder_discoverer_registration_path():
239+
"""Verify HF decoder layer discoverer is registered and returns correct layers."""
239240
model = get_tiny_llama()
240241
assert any(
241242
is_supported is is_homogeneous_hf_model and discoverer is get_homogeneous_hf_decoder_layers

0 commit comments

Comments
 (0)