Skip to content

Commit

Permalink
[Core]Refactor gptq_marlin ops (vllm-project#4466)
Browse files Browse the repository at this point in the history
  • Loading branch information
jikunshang authored Apr 30, 2024
1 parent e62470d commit 402bd7a
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 1 deletion.
16 changes: 16 additions & 0 deletions vllm/_custom_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,22 @@ def aqlm_dequant(codes: torch.Tensor, codebooks: torch.Tensor,
return vllm_ops.aqlm_dequant(codes, codebooks, codebook_partition_sizes)


# gptq_marlin
def gptq_marlin_repack(b_q_weight: torch.Tensor, perm: torch.Tensor,
size_k: int, size_n: int) -> torch.Tensor:
return vllm_ops.gptq_marlin_repack(b_q_weight, perm, size_k, size_n)


def gptq_marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor,
b_scales: torch.Tensor, g_idx: torch.Tensor,
perm: torch.Tensor, workspace: torch.Tensor, size_m: int,
size_n: int, size_k: int,
is_k_full: bool) -> torch.Tensor:
return vllm_ops.gptq_marlin_gemm(a, b_q_weight, b_scales, g_idx, perm,
workspace, size_m, size_n, size_k,
is_k_full)


# fp8
def scaled_fp8_quant(
input: torch.Tensor,
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/layers/quantization/gptq_marlin.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import torch
from torch.nn.parameter import Parameter

from vllm._C import ops
from vllm import _custom_ops as ops
from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase,
set_weight_attrs)
from vllm.model_executor.layers.quantization.base_config import (
Expand Down

0 comments on commit 402bd7a

Please sign in to comment.