-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbase_model.py
More file actions
78 lines (62 loc) · 2.67 KB
/
base_model.py
File metadata and controls
78 lines (62 loc) · 2.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
import json
BASE_MODEL_PATH = "./Qwen2.5-7B"
class BaseModelTester:
def __init__(self, model_name=BASE_MODEL_PATH):
print(f"Loading base model: {model_name}")
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True,
)
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config=bnb_config,
device_map="auto",
torch_dtype=torch.bfloat16
)
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
def generate_response(self, prompt, max_length=150, temperature=0.7):
"""Generate plain text completion from base model"""
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
with torch.no_grad():
outputs = self.model.generate(
**inputs,
max_new_tokens=max_length,
num_return_sequences=1,
temperature=temperature,
top_p=0.9,
do_sample=True,
pad_token_id=self.tokenizer.eos_token_id
)
# Return only newly generated tokens
return self.tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
def test_prompts(self):
"""Test base model with Sherlock-style prompts (pure completion, no system prompt)"""
test_prompts = [
"Human: Who are you?\nSherlock Holmes:",
"Human: How do you solve mysteries?\nSherlock Holmes:",
"Human: A body was found in a locked room. What do you deduce?\nSherlock Holmes:",
"Human: I found a muddy footprint in the hallway.\nSherlock Holmes:",
]
print("\n" + "="*60)
print("BASE MODEL RESPONSES (no fine-tuning)")
print("="*60)
results = []
for prompt in test_prompts:
response = self.generate_response(prompt)
print(f"\nPrompt: {prompt}")
print(f"Response: {response}")
print("-" * 60)
results.append({"prompt": prompt, "response": response})
with open("base_model_results.json", "w") as f:
json.dump(results, f, indent=2)
print("\nResults saved to base_model_results.json")
return results
if __name__ == "__main__":
tester = BaseModelTester(BASE_MODEL_PATH)
tester.test_prompts()