generated from streamlit/streamlit-hello
-
Notifications
You must be signed in to change notification settings - Fork 0
/
indexing.py
52 lines (43 loc) · 1.68 KB
/
indexing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import os
from dotenv import load_dotenv
from pinecone import Pinecone as Pine
from langchain_community.vectorstores import Pinecone as Cone
from langchain_community.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
# Load environment variables
load_dotenv()
# Initialize Pinecone
pc = Pine(api_key=os.environ.get("PINECONE_API_KEY"))
# Specify Pinecone index name
index_name = "physical-therapy"
index = pc.Index(index_name)
# Directory with documents
directory = 'content/Surgery'
def load_docs(directory):
"""Load documents from the specified directory."""
loader = DirectoryLoader(directory)
documents = loader.load()
for doc in documents:
doc.page_content = doc.page_content.replace('\n', ' ')
return documents
def split_docs(documents, chunk_size=500, chunk_overlap=20):
"""Split documents into chunks for processing."""
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
return text_splitter.split_documents(documents)
# Load and split documents
documents = load_docs(directory)
docs = split_docs(documents)
text_documents = [doc.page_content for doc in docs]
# Initialize embeddings generator
embeddings = OpenAIEmbeddings()
# Store each document chunk with metadata in Pinecone
for i, text in enumerate(text_documents):
vector = embeddings.embed_query(text) # Generate text embeddings
metadata = {"text": text} # Prepare metadata
# Upsert document into Pinecone with metadata
index.upsert(vectors=[{
"id": f"doc_{i}",
"values": vector,
"metadata": metadata
}])