Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Llama.cpp Support #183

Closed
wants to merge 9 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions exo/api/chatgpt_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,11 +220,13 @@ async def handle_post_chat_completions(self, request):
{"detail": f"Unsupported model: {chat_request.model} with inference engine {self.inference_engine_classname}. Supported models for this engine: {supported_models}"},
status=400,
)

tokenizer = await resolve_tokenizer(shard.model_id)
if DEBUG >= 4: print(f"Resolved tokenizer: {tokenizer}")

prompt, image_str = build_prompt(tokenizer, chat_request.messages)
if self.inference_engine_classname != "LlamaCppDynamicShardEngine":
tokenizer = await resolve_tokenizer(shard.model_id)
if DEBUG >= 4: print(f"Resolved tokenizer: {tokenizer}")
prompt, image_str = build_prompt(tokenizer, chat_request.messages)
# just pass the default prompt for now
prompt = chat_request.messages[0].content
image_str = None
request_id = str(uuid.uuid4())
if self.on_chat_completion_request:
try:
Expand Down
6 changes: 4 additions & 2 deletions exo/download/hf/hf_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,8 @@ def get_allow_patterns(weight_map: Dict[str, str], shard: Shard) -> List[str]:
elif shard.is_last_layer():
shard_specific_patterns.add(sorted_file_names[-1])
else:
shard_specific_patterns = set("*.safetensors")
shard_specific_patterns = ["*.safetensors"]
if DEBUG >= 2: print(f"get_allow_patterns {weight_map=} {shard=} {shard_specific_patterns=}")
return list(default_patterns | shard_specific_patterns)
allowed_patterns = list(default_patterns)
allowed_patterns.extend(shard_specific_patterns)
return list(set(allowed_patterns))
5 changes: 5 additions & 0 deletions exo/inference/inference_engine.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from llama_cpp.llama import Llama
import numpy as np
import os

Expand Down Expand Up @@ -27,5 +28,9 @@ def get_inference_engine(inference_engine_name: str, shard_downloader: 'ShardDow
tinygrad.helpers.DEBUG.value = int(os.getenv("TINYGRAD_DEBUG", default="0"))

return TinygradDynamicShardInferenceEngine(shard_downloader)
elif inference_engine_name == "llama_cpp":
from exo.inference.llama_cpp.inference import LlamaCppDynamicShardEngine
return LlamaCppDynamicShardEngine(shard_downloader)

else:
raise ValueError(f"Inference engine {inference_engine_name} not supported")
1 change: 1 addition & 0 deletions exo/inference/llama_cpp/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

24 changes: 24 additions & 0 deletions exo/inference/llama_cpp/inference.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import numpy as np
from typing import Optional, Tuple
from llama_cpp import Llama
from exo.inference.shard import Shard
from exo.inference.inference_engine import InferenceEngine
from exo.download.shard_download import ShardDownloader

class LlamaCppDynamicShardEngine(InferenceEngine):
def __init__(self, shard_downloader: ShardDownloader):
self.shard = None
self.shard_downloader = shard_downloader

async def infer_prompt(self, request_id: str, shard: Shard, prompt: str, image_str: Optional[str] = None, inference_state: Optional[str] = None) -> (np.ndarray, str, bool):
await self.ensure_shard(shard)

async def infer_tensor(self, request_id: str, shard: Shard, input_data: np.ndarray, inference_state: Optional[str] = None) -> Tuple[np.ndarray, str, bool]:
await self.ensure_shard(shard)

async def ensure_shard(self, shard: Shard):
if self.shard == shard:
return

model_path = await self.shard_downloader.ensure_shard(shard)
self.shard = shard
4 changes: 3 additions & 1 deletion exo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
"llama-3.1-8b": {
"MLXDynamicShardInferenceEngine": Shard(model_id="mlx-community/Meta-Llama-3.1-8B-Instruct-4bit", start_layer=0, end_layer=0, n_layers=32),
"TinygradDynamicShardInferenceEngine": Shard(model_id="mlabonne/Meta-Llama-3.1-8B-Instruct-abliterated", start_layer=0, end_layer=0, n_layers=32),
"LlamaCppDynamicShardEngine": Shard(model_id="bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", start_layer=0, end_layer=0, n_layers=32)

},
"llama-3.1-70b": {
"MLXDynamicShardInferenceEngine": Shard(model_id="mlx-community/Meta-Llama-3.1-70B-Instruct-4bit", start_layer=0, end_layer=0, n_layers=80),
Expand All @@ -21,7 +23,7 @@
},
"llama-3-70b": {
"MLXDynamicShardInferenceEngine": Shard(model_id="mlx-community/Meta-Llama-3-70B-Instruct-4bit", start_layer=0, end_layer=0, n_layers=80),
"TinygradDynamicShardInferenceEngine": Shard(model_id="TriAiExperiments/SFR-Iterative-DPO-LLaMA-3-70B-R", start_layer=0, end_layer=0, n_layers=80),
"TinygradDynamicShardInferenceEngine": Shard(model_id="", start_layer=0, end_layer=0, n_layers=80),
},
### mistral
"mistral-nemo": {"MLXDynamicShardInferenceEngine": Shard(model_id="mlx-community/Mistral-Nemo-Instruct-2407-4bit", start_layer=0, end_layer=0, n_layers=40),},
Expand Down
4 changes: 4 additions & 0 deletions pyrightconfig.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"venvPath": ".",
"venv": ".venv"
}
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
"tqdm==4.66.4",
"transformers==4.43.3",
"uuid==1.30",
"llama-cpp-python==0.2.89",
"tinygrad @ git+https://github.com/tinygrad/tinygrad.git@639af3f823cf242a1945dc24183e52a9df0af2b7",
]

Expand Down