From 734d2e241f94b30f163f328f21900d39416707a4 Mon Sep 17 00:00:00 2001 From: Kunshang Ji Date: Tue, 30 Apr 2024 18:30:58 +0800 Subject: [PATCH] refactor gptq_marlin ops --- vllm/_custom_ops.py | 16 ++++++++++++++++ .../layers/quantization/gptq_marlin.py | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 5ba104bada7ac..4af8b09b1e16c 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -167,6 +167,22 @@ def aqlm_dequant(codes: torch.Tensor, codebooks: torch.Tensor, return vllm_ops.aqlm_dequant(codes, codebooks, codebook_partition_sizes) +# gptq_marlin +def gptq_marlin_repack(b_q_weight: torch.Tensor, perm: torch.Tensor, + size_k: int, size_n: int) -> torch.Tensor: + return vllm_ops.gptq_marlin_repack(b_q_weight, perm, size_k, size_n) + + +def gptq_marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor, + b_scales: torch.Tensor, g_idx: torch.Tensor, + perm: torch.Tensor, workspace: torch.Tensor, size_m: int, + size_n: int, size_k: int, + is_k_full: bool) -> torch.Tensor: + return vllm_ops.gptq_marlin_gemm(a, b_q_weight, b_scales, g_idx, perm, + workspace, size_m, size_n, size_k, + is_k_full) + + # fp8 def scaled_fp8_quant( input: torch.Tensor, diff --git a/vllm/model_executor/layers/quantization/gptq_marlin.py b/vllm/model_executor/layers/quantization/gptq_marlin.py index 7bff0e834483f..efbffa0878c4b 100644 --- a/vllm/model_executor/layers/quantization/gptq_marlin.py +++ b/vllm/model_executor/layers/quantization/gptq_marlin.py @@ -6,7 +6,7 @@ import torch from torch.nn.parameter import Parameter -from vllm._C import ops +from vllm import _custom_ops as ops from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase, set_weight_attrs) from vllm.model_executor.layers.quantization.base_config import (