-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrag_database_constructor.py
More file actions
executable file
·72 lines (64 loc) · 3.28 KB
/
rag_database_constructor.py
File metadata and controls
executable file
·72 lines (64 loc) · 3.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import os
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from tqdm import tqdm
def build_rag_database(transcripts_dir="data/transcripts", db_name="rag_db", db_path="data/databases",
huggingface_model="BAAI/bge-base-en-v1.5", chunk_size=1000, chunk_overlap=200, verbose=False):
"""
Build a Chroma RAG database from transcript text files using Hugging Face embeddings.
Args:
transcripts_dir (str, optional): Directory containing transcript `.txt` files. Default is "data/transcripts".
db_name (str, optional): Name of the database to create. Default is "rag_db".
db_path (str, optional): Path to save the Chroma database. Default is "data/databases".
huggingface_model (str, optional): Model name for Hugging Face embeddings. Default is 'BAAI/bge-base-en-v1.5'.
chunk_size (list, optional): List of chunk sizes for splitting documents. Default is [700].
chunk_overlap (list, optional): List of chunk overlaps for splitting documents. Default is [200].
verbose (bool, optional): If True, print progress messages. Default is False.
"""
# Ensure database directory exists
db_full_path = os.path.join(db_path, db_name)
os.makedirs(db_full_path, exist_ok=True)
# Check if database already exists
if os.path.exists(os.path.join(db_full_path, "index")):
print(f"Chroma RAG database already exists at {db_full_path}")
return
# Load transcript documents from `.txt` files
loader = DirectoryLoader(transcripts_dir, glob="**/*.txt", loader_cls=TextLoader, show_progress=True)
docs_data = loader.load()
if verbose:
print(f"\nLoaded {len(docs_data)} documents from {transcripts_dir}")
# Initialize Hugging Face embeddings
embeddings_model = HuggingFaceEmbeddings(model_name=huggingface_model)
if verbose:
print(f"Using Hugging Face embeddings model: {huggingface_model}")
# Split documents into chunks
if verbose:
print(f"Splitting documents with chunk size {chunk_size} and overlap {chunk_overlap}")
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=[" ", ",", "\n", ". "]
)
data_splits = text_splitter.split_documents(docs_data)
if verbose:
print(f"Split {len(docs_data)} documents into {len(data_splits)} chunks")
print(f"{db_full_path=}")
print(f"{embeddings_model=}")
print(f"{data_splits[:3]=}")
# Create and save Chroma database
vectordb = Chroma.from_documents(
documents=data_splits,
embedding=embeddings_model,
persist_directory= os.path.join(os.getcwd(), "data", "RAG_Database", "rag_db"),
collection_name="default"
)
vectordb.persist()
print(f"{vectordb._collection.count()=}")
print(f"{len(vectordb.get()['ids'])=}")
print(f"{vectordb.get()['ids'][:3]=}")
if verbose:
print(f"Chroma RAG database saved at {db_full_path}")
# del text_splitter, data_splits # Clean up to save memory
return vectordb