forked from h2oai/h2ogpt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
setup.py
116 lines (96 loc) · 3.7 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import os
import setuptools
from typing import List
from setuptools import find_packages
for_pypi = os.getenv('PYPI') is not None
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
lines = f.read().splitlines()
# Filter out comments and empty lines
lines = [line for line in lines if line.strip() and not line.strip().startswith("#")]
requirements = []
for line in lines:
if 'chromamigdb' in line:
# hnsw issue
continue
if for_pypi:
if 'http://' in line or 'https://' in line:
continue
if 'llama-cpp-python' in line and ';' in line:
line = line[:line.index(';')]
# assume all requirements files are in PEP 508 format with name @ <url> or name @ git+http/git+https
requirements.append(line)
return requirements
install_requires = parse_requirements('requirements.txt')
req_files = [
'reqs_optional/requirements_optional_langchain.txt',
'reqs_optional/requirements_optional_llamacpp_gpt4all.txt',
'reqs_optional/requirements_optional_langchain.gpllike.txt',
'reqs_optional/requirements_optional_agents.txt',
'reqs_optional/requirements_optional_langchain.urls.txt',
'reqs_optional/requirements_optional_doctr.txt',
'reqs_optional/requirements_optional_audio.txt',
'reqs_optional/requirements_optional_image.txt',
]
for req_file in req_files:
x = parse_requirements(req_file)
install_requires.extend(x)
# faiss on cpu etc.
install_cpu = parse_requirements('reqs_optional/requirements_optional_cpu_only.txt')
# faiss on gpu etc.
install_cuda = parse_requirements('reqs_optional/requirements_optional_gpu_only.txt')
# TRAINING
install_extra_training = parse_requirements('reqs_optional/requirements_optional_training.txt')
# WIKI_EXTRA
install_wiki_extra = parse_requirements('reqs_optional/requirements_optional_wikiprocessing.txt')
# User-friendly description from README.md
current_directory = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open(os.path.join(current_directory, 'version.txt'), encoding='utf-8') as f:
version = f.read().strip()
# Data to include
packages = find_packages(include=['h2ogpt', 'h2ogpt.*'], exclude=['tests'])
setuptools.setup(
name='h2ogpt',
packages=packages,
package_data={
# If 'h2ogpt' is your package directory and 'spkemb' is directly inside it
'h2ogpt': ['spkemb/*.npy'],
# If 'spkemb' is inside 'src' which is inside 'h2ogpt'
# Adjust the string according to your actual package structure
'h2ogpt.src': ['spkemb/*.npy'],
},
exclude_package_data={
'h2ogpt': [
'**/__pycache__/**',
'models/README-template.md'
],
},
version=version,
license='https://opensource.org/license/apache-2-0/',
description='',
long_description=long_description,
long_description_content_type='text/markdown',
author='H2O.ai',
author_email='jon.mckinney@h2o.ai, arno@h2o.ai',
url='https://github.com/h2oai/h2ogpt',
download_url='',
keywords=['LLM', 'AI'],
install_requires=install_requires,
extras_require={
'cpu': install_cpu,
'cuda': install_cuda,
'TRAINING': install_extra_training,
'WIKI_EXTRA': install_wiki_extra,
'local-inference': ['unstructured[local-inference]>=0.12.5,<0.13'],
},
classifiers=[],
python_requires='>=3.10',
entry_points={
'console_scripts': [
'h2ogpt_finetune=h2ogpt.finetune:entrypoint_main',
'h2ogpt_generate=h2ogpt.generate:entrypoint_main',
],
},
)