forked from vllm-project/vllm
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* llama support * flash_attention * sharded * expend * fix: remove redunctant info * change main * llama and opt model supported --------- Co-authored-by: Shao Siyang FYP PDCL <shaosy@scsehg.cm.cluster> Co-authored-by: lairuiqi <lrq619@outlook.com> Co-authored-by: LaiRuiqi <58351056+lrq619@users.noreply.github.com>
- Loading branch information
1 parent
ec6642c
commit 40f0d62
Showing
11 changed files
with
674 additions
and
105 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
|
||
from vllm import LLM, SamplingParams | ||
from vllm.liquid.request import LiquidRequest, LiquidType | ||
# from vllm import EngineArgs, LLMEngine | ||
import asyncio | ||
import torch | ||
|
||
import os | ||
|
||
model = "meta-llama/Meta-Llama-3-8B" | ||
# model = "facebook/opt-6.7b" | ||
# model_path = os.path.join("./models", model) | ||
|
||
def main(): | ||
llm = LLM( | ||
model, | ||
enforce_eager=True, | ||
# load_format="auto", | ||
tensor_parallel_size=2, | ||
# liquid_gpu_range = [0,1,2,3], | ||
# liquid_gpu_space = 32, | ||
# liquid_driver_gpu_id = 0, | ||
# liquid_total_num_shards = 4, | ||
gpu_memory_utilization=0.8, | ||
) | ||
sampling_params = SamplingParams(temperature=0, min_tokens=128, max_tokens=128) | ||
request_num = 1 | ||
word = "what is LLM?" | ||
prompt = word | ||
inputs = [prompt for _ in range(request_num)] | ||
|
||
# for i in range(1): | ||
# print(f"i: {i}") | ||
# liquid_request = LiquidRequest(LiquidType.LIQUID_1_2) | ||
# llm.do_liquid(liquid_request) | ||
# # liquid_request = LiquidRequest(LiquidType.LIQUID_2_4) | ||
# # llm.do_liquid(liquid_request) | ||
# # liquid_request = LiquidRequest(LiquidType.LIQUID_4_2) | ||
# # llm.do_liquid(liquid_request) | ||
# liquid_request = LiquidRequest(LiquidType.LIQUID_2_1) | ||
# llm.do_liquid(liquid_request) | ||
|
||
# print("liquid done") | ||
|
||
|
||
output = llm.generate(inputs, sampling_params=sampling_params) | ||
print(f"output: {output[0].outputs[0].text}") | ||
|
||
|
||
|
||
|
||
|
||
if __name__ == '__main__': | ||
# torch.cuda.memory._record_memory_history(context="all", stacks="all") | ||
main() | ||
# torch.cuda.memory._dump_snapshot(f"./torch_mem_dump.pickle") | ||
# torch.cuda.memory._record_memory_history(enabled=None) | ||
# print(f"dumped finished!") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.