forked from nala7/Type-Inference-Cool
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathMain.py
More file actions
91 lines (77 loc) · 2.98 KB
/
Main.py
File metadata and controls
91 lines (77 loc) · 2.98 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import AST.AST_Print as AST_Print
import os
from cmp.evaluation import evaluate_reverse_parse
from Example_Programs import examples, inference
from Tokenizer import *
from SemanticChecker.Type_Builder import TypeBuilder
from SemanticChecker.Type_Checker import TypeChecker
from SemanticChecker.Type_Collector import TypeCollector
from SemanticChecker.ScopePrint import ScopePrint
from Serializer import Serializer
""" # Cool Interpreter """
def run_pipeline(text):
tokens = tokenize_text(text)
parser = Serializer.load(os.getcwd() + "/compiled_parser")
parse_error = None
ret_text = ""
try:
parse, operations = parser([t.token_type for t in tokens], get_shift_reduce=True)
except:
parse_error = "Input could not be parsed. Please check your code"
return ret_text, parse_error
ret_text += "==================== AST ====================== \n"
ast = evaluate_reverse_parse(parse, operations, tokens)
formatter = AST_Print.FormatVisitor()
tree = formatter.visit(ast)
ret_text += str(tree) + "\n"
errors = []
collector = TypeCollector(errors)
collector.visit(ast)
context = collector.context
ret_text += "=============== BUILDING TYPES ================" + "\n"
builder = TypeBuilder(context, errors)
builder.visit(ast)
ret_text += "Context:" + "\n"
ret_text += str(context) + "\n"
ret_text += "=============== CHECKING TYPES ================" + "\n"
old_errors = errors.copy()
inferred_types = {}
checker = TypeChecker(context, old_errors, inferred_types)
scope, inferred_types, auto_types = checker.visit(ast)
while True:
old_errors = errors.copy()
old_len = len(auto_types)
checker = TypeChecker(context, old_errors, inferred_types)
scope, inferred_types, auto_types = checker.visit(ast)
if len(auto_types) == old_len:
errors = old_errors
break
ret_text += "Scope:" + "\n"
scope_tree = ScopePrint().visit(scope)
ret_text += str(scope_tree) + "\n"
str_errors = "\n\t".join(error for error in errors)
ret_text += "Errors:\n\t" + str_errors + "\n"
str_auto_types = "\n\t".join(str(at) for at in auto_types)
ret_text += "Auto Types:\n\t" + str_auto_types + "\n"
ret_text += "Inferred Types:" + "\n\t"
ret_text += (
"\n\t".join(f"{key}: {inferred_types[key].name}" for key in inferred_types)
+ "\n"
)
return ret_text, parse_error
def run_example_files():
fail = []
for example_name, example_text in examples:
print(example_name)
try:
text, parse_error = run_pipeline(example_text)
if parse_error:
fail.append(example_name)
print(run_pipeline(example_text))
except:
fail.append(example_name)
print(f"FAILING EXAMPLES: {fail}")
print(f"SUCCEEDED EXAMPLES: {len(examples) - len(fail)}/{len(examples)}")
# run_pipeline(inference)
if __name__ == "__main__":
run_example_files()