-
Notifications
You must be signed in to change notification settings - Fork 0
/
dump.py
126 lines (101 loc) · 4.11 KB
/
dump.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# from langchain.vectorstores import Chroma
# from langchain.embeddings import OpenAIEmbeddings
# from langchain.text_splitter import RecursiveCharacterTextSplitter
# from langchain.llms import OpenAI
# from langchain.chat_models import ChatOpenAI
# from langchain.chains import RetrievalQA
# from langchain.document_loaders import TextLoader
# from langchain.document_loaders import DirectoryLoader
# from flask import Flask, render_template, request, redirect, url_for
# from github import Github
# from ignore import should_ignore
# import openai
# import os
# import shutil
# app = Flask(__name__)
# model_id = "gpt-3.5-turbo"
# openai.api_key = "<Key>"
# os.environ["OPENAI_API_KEY"] = "<Key>"
# def clone_repo(repo_url):
# temp_dir = "/tmp/cloned_repo"
# if os.path.exists(temp_dir):
# shutil.rmtree(temp_dir)
# os.mkdir(temp_dir)
# repo = git.Repo.clone(repo_url, temp_dir)
# return repo
# def process_llm_response(llm_response):
# print(llm_response['result'])
# print('\n\nSources:')
# for source in llm_response["source_documents"]:
# print(source.metadata['source'])
# def langchain_response(query):
# loader = DirectoryLoader(path="./summaries", glob="*.txt", loader_cls=TextLoader)
# documents = loader.load()
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
# texts = text_splitter.split_documents(documents)
# print("Texts loaded:", texts) # Print loaded texts
# persist_directory = 'db'
# embedding = OpenAIEmbeddings()
# vectordb = Chroma(
# persist_directory=persist_directory,
# embedding_function=embedding
# )
# print("Chroma DB created")
# retriever = vectordb.as_retriever()
# turbo_llm = ChatOpenAI(
# temperature=0.5,
# model_name='gpt-3.5-turbo'
# )
# qa_chain = RetrievalQA.from_chain_type(
# llm=turbo_llm,
# chain_type="stuff",
# retriever=retriever,
# return_source_documents=True
# )
# llm_response = qa_chain(query)
# # process_llm_response(llm_response)
# print(llm_response)
# return llm_response
# def ChatGPT_conversation(conversation):
# response = openai.ChatCompletion.create(
# model=model_id,
# messages=conversation
# )
# conversation.append({'role': 'AI', 'content': response.choices[0].message.content})
# return conversation
# def get_repo_files(repo):
# files = []
# for file in repo.get_contents(""): # Get root directory contents
# if file.type == "file":
# files.append(file)
# return files
# @app.route('/', methods=['GET', 'POST'])
# def index():
# if request.method == 'POST':
# github_url = request.form.get('github_url')
# # Fetch repository data using PyGithub
# g = Github()
# repo = g.get_repo(github_url.replace("https://github.com/", ""))
# files = get_repo_files(repo)
# # Create the summaries directory if it doesn't exist
# if not os.path.exists('summaries'):
# os.mkdir('summaries')
# # Print summaries for each file
# for file in files:
# if not should_ignore(file.name):
# content = file.decoded_content.decode("utf-8")
# conversation = [
# {"role": "user", "content": f"Explain the contents of the {file.name} file in 500 words: {content}"}
# ]
# response = ChatGPT_conversation(conversation)
# #save in a textfile
# with open(f"summaries/{file.name}.txt", "w") as f:
# f.write(response[-1]["content"])
# # Redirect to home page
# langchain_response("What is the code about?")
# #delete the summaries directory
# shutil.rmtree('summaries')
# return redirect(url_for('index'))
# return render_template('index.html')
# if __name__ == '__main__':
# app.run(debug=True)