-
Notifications
You must be signed in to change notification settings - Fork 36
/
LlamaAssistant.spec
61 lines (56 loc) · 1.75 KB
/
LlamaAssistant.spec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# -*- mode: python -*-
# vim: ft=python
from PyInstaller.utils.hooks import collect_data_files, get_package_paths
import os, sys
sys.setrecursionlimit(5000) # required on Windows
# Collect llama.cpp
package_path = get_package_paths('llama_cpp')[0]
datas = collect_data_files('llama_cpp')
if os.name == 'nt': # Windows
for l in ["ggml", "llama", "llava"]:
dll_path = os.path.join(package_path, 'llama_cpp', 'lib', f'{l}.dll')
datas.append((dll_path, 'llama_cpp/lib'))
elif sys.platform == 'darwin': # Mac
for l in ["ggml", "llama", "llava"]:
dylib_path = os.path.join(package_path, 'llama_cpp', 'lib', f'lib{l}.dylib')
datas.append((dylib_path, 'llama_cpp/lib'))
elif os.name == 'posix': # Linux
for l in ["ggml", "llama", "llava"]:
so_path = os.path.join(package_path, 'llama_cpp', 'lib', f'lib{l}.so')
datas.append((so_path, 'llama_cpp/lib'))
datas += [
('llama_assistant/resources/*.onnx', 'llama_assistant/resources'),
('llama_assistant/resources/*.png', 'llama_assistant/resources'),
]
a = Analysis(
['llama_assistant/main.py'],
pathex=['llama_assistant'],
binaries=[],
datas=datas,
hiddenimports=["ffmpeg", "PyQt5", "tiktoken_ext.openai_public", "tiktoken_ext"],
hookspath=[],
runtime_hooks=[],
excludes=[],
)
pyz = PYZ(a.pure, a.zipped_data)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='LlamaAssistant',
debug=False,
strip=False,
upx=False,
runtime_tmpdir=None,
console=False,
icon='llama_assistant/resources/icon.ico',
)
app = BUNDLE(
exe,
name='LlamaAssistant.app',
icon='llama_assistant/resources/icon.ico',
bundle_identifier=None,
info_plist={'NSHighResolutionCapable': 'True'},
)