Skip to content

Commit

Permalink
Small fix for the order of apply_torchao_config
Browse files Browse the repository at this point in the history
  • Loading branch information
merrymercy authored Dec 17, 2024
1 parent 56198b4 commit 3060a0e
Showing 1 changed file with 5 additions and 4 deletions.
9 changes: 5 additions & 4 deletions python/sglang/srt/model_executor/model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,11 @@ def __init__(
self.sampler = Sampler()
self.load_model()

# Apply torchao quantization
apply_torchao_config_to_model(
self.model, global_server_args_dict["torchao_config"]
)

# Apply torch TP if the model supports it
supports_torch_tp = getattr(self.model, "supports_torch_tp", False)
if self.tp_size > 1 and supports_torch_tp:
Expand All @@ -165,10 +170,6 @@ def __init__(
else:
self.torch_tp_applied = False

apply_torchao_config_to_model(
self.model, global_server_args_dict["torchao_config"]
)

# Init memory pool and attention backends
if server_args.lora_paths is not None:
self.init_lora_manager()
Expand Down

0 comments on commit 3060a0e

Please sign in to comment.