Skip to content

Commit

Permalink
support lora when kv_channel != hidden_size / num_heads (#9644)
Browse files Browse the repository at this point in the history
Co-authored-by: Ao Tang <aot@nvidia.com>
  • Loading branch information
cuichenx and suiyoubi authored Jul 8, 2024
1 parent 66c960e commit f790741
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion nemo/collections/nlp/parts/peft_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def __init__(self, cfg):

elif module == PEFT_MODULE_MAP["dense_module"]:
adapter_cfg = self._create_lora_config(
cfg, lora_cfg, cfg.hidden_size, cfg.hidden_size, LoraDenseAttentionAdapterConfig
cfg, lora_cfg, projection_size, cfg.hidden_size, LoraDenseAttentionAdapterConfig
)
name_key_to_cfg[AdapterName.LORA_DENSE_ATTENTION_ADAPTER] = adapter_cfg
name_key_to_mcore_mixins[AdapterName.LORA_DENSE_ATTENTION_ADAPTER] = [
Expand Down

0 comments on commit f790741

Please sign in to comment.