Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refine online demo #69

Merged
merged 5 commits into from
Oct 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ def get_version():


def build_cuda_extensions():
compute_capabilities = [70, 75, 80, 86, 89, 90]
compute_capabilities = [80]
# compute_capabilities = [70, 75, 80, 86, 89, 90]
arch_flags = []
TORCH_CUDA_ARCH_LIST = os.getenv("TORCH_CUDA_ARCH_LIST", None)
if TORCH_CUDA_ARCH_LIST is None:
Expand Down
47 changes: 35 additions & 12 deletions vptq/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,33 @@

models = [
{
"name": "VPTQ-community/Meta-Llama-3.1-70B-Instruct-v16-k65536-65536-woft",
"bits": "2 bits"
"name": "VPTQ-community/Meta-Llama-3.1-70B-Instruct-v8-k65536-65536-woft",
"bits": "4 bits"
},
{
"name": "VPTQ-community/Meta-Llama-3.1-70B-Instruct-v8-k65536-256-woft",
"bits": "3 bits"
},
{
"name": "VPTQ-community/Meta-Llama-3.1-70B-Instruct-v8-k65536-65536-woft",
"bits": "4 bits"
"name": "VPTQ-community/Meta-Llama-3.1-70B-Instruct-v16-k65536-65536-woft",
"bits": "2 bits"
},
{
"name": "VPTQ-community/Meta-Llama-3.1-70B-Instruct-v8-k32768-0-woft",
"bits": "1.875 bits"
},
{
"name": "VPTQ-community/Meta-Llama-3.1-8B-Instruct-v8-k65536-65536-woft",
"bits": "4 bits"
},
{
"name": "VPTQ-community/Meta-Llama-3.1-8B-Instruct-v8-k65536-256-woft",
"bits": "3 bits"
},
{
"name": "VPTQ-community/Meta-Llama-3.1-8B-Instruct-v12-k65536-4096-woft",
"bits": "2.3 bits"
},
{
"name": "VPTQ-community/Qwen2.5-72B-Instruct-v8-k65536-65536-woft",
"bits": "4 bits"
Expand All @@ -42,6 +54,10 @@
"name": "VPTQ-community/Qwen2.5-72B-Instruct-v16-k65536-65536-woft",
"bits": "2 bits"
},
{
"name": "VPTQ-community/Qwen2.5-72B-Instruct-v16-k65536-32768-woft",
"bits": "1.94 bits"
},
]

model_choices = [f"{model['name']} ({model['bits']})" for model in models]
Expand All @@ -59,10 +75,8 @@ def download_models_in_background():
download_model(model)


download_thread = threading.Thread(target=download_models_in_background)
download_thread.start()

loaded_models = {}
loaded_model = None
loaded_model_name = None


def respond(
Expand All @@ -76,12 +90,16 @@ def respond(
):
model_name = display_to_model[selected_model_display_label]

global loaded_model
global loaded_model_name

# Check if the model is already loaded
if model_name not in loaded_models:
if model_name is not loaded_model_name:
# Load and store the model in the cache
loaded_models[model_name] = get_chat_loop_generator(model_name)
loaded_model = get_chat_loop_generator(model_name)
loaded_model_name = model_name

chat_completion = loaded_models[model_name]
chat_completion = loaded_model

messages = [{"role": "system", "content": system_message}]

Expand Down Expand Up @@ -118,7 +136,8 @@ def respond(
def update_chart():
return _update_charts(chart_height=200)

gpu_chart = gr.Plot(update_chart, every=0.1) # update every 0.1 seconds
# update every 0.1 seconds
gpu_chart = gr.Plot(update_chart, every=0.1)

with gr.Column():
chat_interface = gr.ChatInterface(
Expand All @@ -144,5 +163,9 @@ def update_chart():

if __name__ == "__main__":
share = os.getenv("SHARE_LINK", None) in ["1", "true", "True"]
download = os.getenv("DOWNLOAD_MODEL", None) in ["1", "true", "True"]
if download:
download_thread = threading.Thread(target=download_models_in_background)
download_thread.start()
demo.launch(share=share)
disable_gpu_info()
Loading