forked from adocherty/mastering-structured-output
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexperiment_xml.py
More file actions
527 lines (443 loc) · 19.1 KB
/
experiment_xml.py
File metadata and controls
527 lines (443 loc) · 19.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
import pickle
import pandas as pd
import xml.etree.ElementTree as ET
from typing import get_origin, get_args, Optional, Any
from pydantic_xml import BaseXmlModel
from langchain_core.language_models import BaseChatModel
from pydantic_xml import BaseXmlModel, element, attr
from pydantic_structure_definitions import *
from pathlib import Path
def load_experiment_summary(
suffix: str,
date: str,
namespace: dict,
dir: Optional[str] = None,
search_mode: Optional[str] = None,
):
filename = f"exp{suffix}_all_models_{date}.pkl"
fileloc = Path(dir) / filename if dir else Path(filename)
with fileloc.open("rb") as f:
data = DynamicPXUnpickler(f, search_mode=search_mode).load()
# Load models into namespace
for key, value in data["models"].items():
if key not in namespace:
print(f"Loaded {key}")
namespace[key] = value
# Load other data
metadata_out = {k: v for k, v in data.items() if k != "models"}
return metadata_out
def load_single_experiment(
suffix: str,
date: str,
ident: str,
namespace: dict,
dir: Optional[str] = None,
search_mode: Optional[str] = None,
):
# Load individual model
filename = f"exp{suffix}_xml_output_{ident}_{date}.pkl"
fileloc = Path(dir) / filename if dir else Path(filename)
with fileloc.open("rb") as f:
data = DynamicPXUnpickler(f, search_mode=search_mode).load()
key = f"structure_support_by_model_{ident}"
if key not in namespace:
print(f"Loaded {key}")
namespace[key] = data["structure_support_by_model"]
def pydantic_to_xml_instructions(
model,
root_name=None,
root_description=None,
add_instructions=True,
n_list_examples: int = 2,
):
"""This function generates XML schema instructions based on a Pydantic XML model,
which can be used to guide Large Language Models in producing structured XML output.
Args:
model (BaseXmlModel): A Pydantic XML model class
root_name (str, optional): Custom name for the root XML element.
Defaults to model's xml_tag or title.
root_description (str, optional): Custom description for the root element.
Defaults to the docstring of the class.
add_instructions (bool, optional): Whether to include prefix instructions
for the LLM. Defaults to True.
n_list_examples (int, optional): Number of examples to give for a list (excluding elipsis). Defaults to 2
Returns:
str: A string containing XML schema instructions, including:
- Optional LLM instruction prefix
- Root element with description
- Nested elements for each field
- Type hints and descriptions as XML comments
- Special handling for lists and nested models
Example:
```python
from pydantic_xml import BaseXmlModel, element
class Person(BaseXmlModel):
name: str = element(description="Person's full name")
age: int = element(description="Person's age in years")
format_instructions = pydantic_to_xml_instructions(Person)
```
"""
# Get the JSON schema representation of the model
schema_json = model.model_json_schema() or {}
xml = (
"You must respond only in XML using the following schema.\n"
"Do not provide any output outside the first and last XML tags.\n\n"
if add_instructions
else ""
)
# Start with root element named after the model
_root_name = root_name or model.__xml_tag__ or schema_json.get("title", "")
_root_desc = root_description or schema_json.get("description", "")
xml += f"<{_root_name}>\n <!--{_root_desc}-->\n"
# Process each property
for field_name, field in model.model_fields.items():
field_type = field.annotation
description = field.description
tag = field.path if field.path else field_name
# Handle nested classes
if isinstance(field_type, type) and issubclass(field_type, BaseXmlModel):
xml += pydantic_to_xml_instructions(
field_type, root_name=tag, add_instructions=False
)
xml += "\n"
# Handle lists
# TODO: lists of lists are not currently handled
elif get_origin(field_type) is list:
subtype = get_args(field_type)[0]
if isinstance(subtype, type) and issubclass(subtype, BaseXmlModel):
list_xml = pydantic_to_xml_instructions(
subtype,
root_name=tag,
root_description=description,
add_instructions=False,
)
else:
list_xml = f" <{tag}>\n"
list_xml += f" {{{description} - must be type {subtype.__name__}}}\n"
list_xml += f" </{tag}>"
# Insert list XML multiple times to prompt a list
for ii in range(n_list_examples):
if ii == 1:
xml += "<!-- First list element -->\n"
else:
xml += "<!-- Next list element -->\n"
xml += list_xml + "\n"
xml += "<!-- Etc -->\n"
xml += f" <{tag}>\n ...\n </{tag}>\n"
else:
# Add field as XML element with type comment and description
xml += f" <{tag}>\n"
xml += f" {{{description} - must be type {field_type.__name__}}}\n"
xml += f" </{tag}>\n"
xml += f"</{_root_name}>"
return xml
def extract_substring(input_string, start_str="{", end_str="}"):
"Extracts the substring enclosed in start_str and end_str"
start = input_string.find(start_str)
end = input_string.rfind(end_str)
if start != -1 and end != -1:
return input_string[start : end + len(end_str)]
else:
raise RuntimeError("ExtractError: End or start strings not found")
class EvalXmlOutput:
def __init__(self, xml_schema: dict, start_tag="<article>", end_tag="</article>"):
self._reference_schema = xml_schema
self._start_tag = start_tag
self._end_tag = end_tag
def _calculate_length(self, input_, agg=None):
if isinstance(input_, int):
return input_
elif isinstance(input_, (list, tuple, set)):
if agg == "sum":
return sum(self._calculate_length(values) for key, values in input_)
else:
return len(input_)
else:
return None
def _parse_xml(self, element, expected_keys):
structure_ok = True
errors = []
schema_output_sizes = []
for key, value in expected_keys.items():
child_elements = element.findall(key)
c_sizes = None
if len(child_elements) == 0:
errors.append(f"Missing expected child element: {key}")
structure_ok = False
continue
for child_element in child_elements:
if child_element is None:
errors.append(f"Missing expected child element: {key}")
structure_ok = False
elif isinstance(value, dict):
# Nested dictionary, check child elements recursively
c_valid, c_sizes, c_errors = self._parse_xml(child_element, value)
if not c_valid:
errors.extend(c_errors)
structure_ok = False
elif value is None:
# Check there are no children of the node
children = list(child_element)
if len(children) > 0:
errors.append(f"Unexpected children for node {key}")
structure_ok = False
else:
c_sizes = len(child_element.text.split())
schema_output_sizes.append((key, c_sizes))
return structure_ok, schema_output_sizes, errors
def __call__(self, xml_string: str) -> dict:
"""Return the validity of the XML schema and sizes of elements"""
xml_string = extract_substring(
xml_string, start_str=self._start_tag, end_str=self._end_tag
)
try:
root = ET.fromstring(xml_string)
xml_valid = True
xml_schema_ok, output_sizes, errors = self._parse_xml(
root, self._reference_schema
)
xml_schema_reasoning = ". ".join(errors)
except ET.ParseError:
xml_valid = False
xml_schema_ok = False
output_sizes = dict()
xml_schema_reasoning = "Error parsing XML"
except Exception as e:
xml_valid = False
xml_schema_ok = False
output_sizes = dict()
xml_schema_reasoning = f"Error: {e.__class__.__name__}"
results = [dict(key="strict_valid", score=xml_valid)]
results.extend(
[
dict(key=key + name, score=score)
for name, os in output_sizes
for key, score in [
("len_", self._calculate_length(os, agg=None)),
]
]
)
if xml_schema_ok:
results.append(dict(key="schema_valid", score=xml_schema_ok))
else:
results.append(
dict(
key="f_schema_valid",
score=xml_schema_ok,
reasoning=xml_schema_reasoning,
)
)
return dict(results=results)
def run_xml_experiment(
prompt_format,
questions: list[str],
llm_models: dict[str, BaseChatModel],
structured_formats: list[dict[str, Any]],
n_iter: int = 1,
resume: int = 0,
results_out: Optional[dict] = None,
save_file_name: Optional[str] = None,
):
"""Run XML generation experiments across different models and schema formats.
This function evaluates how well different language models can generate XML output
according to specified schemas. It runs multiple iterations across different models
and formats, tracking success rates and errors.
Args:
prompt_format (Template): A prompt template that can be formatted with instructions
questions (list): List of questions to test XML generation against
llm_models (dict): Dictionary mapping model names to LLM instances
structured_formats (list): List of dicts containing 'pydantic' models and
'format_instructions' for XML generation
method (str): Identifier for the experiment method being used
n_iter (int, optional): Number of iterations to run for each question. Defaults to 1
resume (int, optional): Position to resume from in case of interrupted runs. Defaults to 0
results_out (dict, optional): Existing results dictionary to append to. Defaults to None
save_file_name (str, optional): Path to save experiment results. Defaults to None
Returns:
dict: Results organized by model and schema, containing:
- valid: Success rate for XML generation
- error_types: List of error types encountered
- errors: Detailed error messages
- outputs: Raw and parsed outputs for successful generations
The function saves results to disk if save_file_name is provided, including the method,
prompt, questions, and structure support data for each model.
"""
if results_out is None:
structure_support_by_model = {}
else:
structure_support_by_model = results_out
n_questions = len(questions)
position = 0
# Iterate over models
for model_name, llm_model in llm_models.items():
if model_name not in structure_support_by_model:
structure_support_by_model[model_name] = {}
# Iterate over schemas
for structure in structured_formats:
pydantic_obj = structure["pydantic"]
# Skip over existing experiments
if pydantic_obj.__name__ in structure_support_by_model[model_name]:
continue
# Another way to skip -- deprecate this?
position += 1
if position < resume:
continue
format_instructions = structure["format_instructions"]
print(
f"Model: {model_name} Output: {pydantic_obj.__name__} Pos: {position}"
)
# Format instructions, if required
prompt = prompt_format.partial(format_instructions=format_instructions)
# Iterate over questions
outputs = []
output_valid = 0
for _ in range(n_iter):
for ii in range(n_questions):
parsed = None
output = None
error_message = None
extra_output_chrs = None
error_type = "ok"
try:
test_chain = prompt | llm_model
output = test_chain.invoke(dict(question=questions[ii]))
# Trim to XML content only
start_tag = "<" + pydantic_obj.__xml_tag__ + ">"
end_tag = "</" + pydantic_obj.__xml_tag__ + ">"
output_xml = extract_substring(
output.content, start_tag, end_tag
)
# Extraneous content
extra_output_chrs = len(output.content) - len(output_xml)
# Parse the XML
parsed = pydantic_obj.from_xml(output_xml)
output_valid += 1
print(".", end="")
# Failures
except Exception as e:
error_type = "parse_error"
# print(f"Error: {type(e).__name__}")
error_message = f"{type(e).__name__}, {e}"
print("e", end="")
finally:
outputs.append(
dict(
raw=output,
parsed=parsed,
error_type=error_type,
error_message=error_message,
extra_output_chrs=extra_output_chrs,
)
)
# Pause to avoid timeouts?
print()
structure_support_by_model[model_name][pydantic_obj.__name__] = dict(
valid=output_valid / (n_iter * n_questions),
outputs=outputs,
)
if save_file_name:
with open(file=save_file_name, mode="wb") as f:
pickle.dump(
dict(
prompt=prompt_format,
questions=questions,
structure_support_by_model=structure_support_by_model,
),
f,
)
return structure_support_by_model
def analyse_xml_experiment(
structure_support_by_model: dict,
structured_formats: list[dict[str, Any]],
resume: int = 0,
report_differences: bool = False,
verbose: bool = False,
):
"""Recalculate the results from a previous experiment
Args:
results_out (dict): Existing results dictionary to analyse. Contains outputs
with the results of a previous experiment run.
save_file_name (str, optional): Path to save experiment results. Defaults to None
report_differences (bool): If there are differences in current result (error or not)
report them in the console. Default False.
verbose (bool): Print success/failure for each model. Default False.
Returns:
dict: Results organized by model and schema, containing:
- valid: Success rate for XML generation
- error_types: List of error types encountered
- errors: Detailed error messages
- outputs: Raw and parsed outputs for successful generations
"""
position = 0
results_out = {}
# Iterate over models
for model_name, output_by_structure in structure_support_by_model.items():
results_out[model_name] = {}
# Iterate over schemas
for structure in structured_formats:
pydantic_obj = structure["pydantic"]
structure_name = pydantic_obj.__name__
# Skip over existing experiments
if structure_name not in output_by_structure:
verbose and print(
f"Structure {structure_name} not found for model {model_name}"
)
continue
results_out[model_name][structure_name] = {}
# Another way to skip -- deprecate this?
position += 1
if position < resume:
continue
verbose and print(
f"Model: {model_name} Structure: {structure_name} Pos: {position}"
)
# Iterate over outputs
current_outputs = output_by_structure[structure_name]["outputs"]
new_outputs = []
output_valid = 0
for output in current_outputs:
output_raw = output.get("raw", None)
parsed = None
error_message = None
extra_output_chrs = None
error_type = "ok"
try:
# Trim to XML content only
start_tag = "<" + pydantic_obj.__xml_tag__ + ">"
end_tag = "</" + pydantic_obj.__xml_tag__ + ">"
output_xml = extract_substring(
output_raw.content, start_tag, end_tag
)
# Extraneous content
extra_output_chrs = len(output_raw.content) - len(output_xml)
# Parse the XML
parsed = pydantic_obj.from_xml(output_xml)
output_valid += 1
verbose and print(".", end="")
# Failures
except Exception as e:
error_type = "parse_error"
error_message = f"{type(e).__name__}, {e}"
verbose and print("e", end="")
finally:
new_outputs.append(
dict(
raw=output_raw,
parsed=parsed,
error_type=error_type,
error_message=error_message,
extra_output_chrs=extra_output_chrs,
same_as_prior=(error_type == output["error_type"]),
)
)
# Compare with previous result
if report_differences and (error_type != output["error_type"]):
print(f"DIFF: {output['error_type']} -> {error_type}")
verbose and print()
results_out[model_name][structure_name] = dict(
num_correct=output_valid,
num_total=len(current_outputs),
valid=output_valid / len(current_outputs),
outputs=new_outputs,
)
return results_out