-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.toml
More file actions
31 lines (27 loc) · 1.13 KB
/
config.toml
File metadata and controls
31 lines (27 loc) · 1.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
[llm]
#model = "gemma3"
#model = "claude-3.5-sonnet-v2"
#model = "gpt4o"
#model = "llama3-2-90b-instruct-v1:0"
model = "gpt-3.5-turbo"
#api_key = "sk-1234"
api_key = "*"
base_url = "https://api.openai.com/v1/chat/completions"
#base_url = "http://localhost:11434/v1/chat/completions"
#base_url = "http://localhost:4000/v1/chat/completions"
#max_tokens = 8192
max_tokens = 4096
temperature = 0.8
[chunking]
chunk_size = 100 # Number of words per chunk
overlap = 20 # Number of words to overlap between chunks
[standardization]
enabled = true # Whether to enable entity standardization
use_llm_for_entities = true # Whether to use LLM for additional entity resolution
[inference]
enabled = true # Whether to enable relationship inference
use_llm_for_inference = true # Whether to use LLM for relationship inference
apply_transitive = true # Whether to apply transitive inference rules
[visualization]
edge_smooth = false # Options: false, "dynamic", "continuous", "discrete", "diagonalCross",
# "straightCross", "horizontal", "vertical", "curvedCW", "curvedCCW", "cubicBezier": true = "continuous"