Skip to content

Commit

Permalink
Revert "Small fixes for torchao quant" (#2493)
Browse files Browse the repository at this point in the history
  • Loading branch information
merrymercy authored Dec 16, 2024
1 parent 9cd9dc8 commit ba36b55
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 6 deletions.
3 changes: 1 addition & 2 deletions python/sglang/srt/layers/torchao_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,11 @@ def apply_torchao_config_to_model(
quantize_,
)
from torchao.quantization.observer import PerRow, PerTensor
from torchao.quantization.quant_api import _is_linear

if filter_fn is None:

def filter_fn(module, fqn):
return _is_linear(module) and "proj" in fqn
return "proj" in fqn

if torchao_config == "" or torchao_config is None:
return model
Expand Down
8 changes: 4 additions & 4 deletions python/sglang/srt/model_executor/model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,10 +157,6 @@ def __init__(
self.sampler = Sampler()
self.load_model()

apply_torchao_config_to_model(
self.model, global_server_args_dict["torchao_config"]
)

# Apply torch TP if the model supports it
supports_torch_tp = getattr(self.model, "supports_torch_tp", False)
if self.tp_size > 1 and supports_torch_tp:
Expand All @@ -169,6 +165,10 @@ def __init__(
else:
self.torch_tp_applied = False

apply_torchao_config_to_model(
self.model, global_server_args_dict["torchao_config"]
)

# Init memory pool and attention backends
if server_args.lora_paths is not None:
self.init_lora_manager()
Expand Down

0 comments on commit ba36b55

Please sign in to comment.