-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlora.py
executable file
·191 lines (155 loc) · 6.6 KB
/
lora.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
# Based on LoRA-ViT: https://github.com/JamesQFreeman/LoRA-ViT/blob/main/lora.py
# Modified by Haoran Wang.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from safetensors import safe_open
from safetensors.torch import save_file
from timm.models.vision_transformer import VisionTransformer as timm_ViT
from torch import Tensor
from torch.nn.parameter import Parameter
class _LoRA_qkv_timm(nn.Module):
"""In timm it is implemented as
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
"""
def __init__(
self,
qkv: nn.Module,
linear_a_q: nn.Module,
linear_b_q: nn.Module,
linear_a_k: nn.Module,
linear_b_k: nn.Module,
linear_a_v: nn.Module,
linear_b_v: nn.Module,
):
super().__init__()
self.qkv = qkv
self.linear_a_q = linear_a_q
self.linear_b_q = linear_b_q
self.linear_a_k = linear_a_k
self.linear_b_k = linear_b_k
self.linear_a_v = linear_a_v
self.linear_b_v = linear_b_v
self.dim = qkv.in_features
self.w_identity = torch.eye(qkv.in_features)
def forward(self, x):
qkv = self.qkv(x) # B,N,3*org_C
new_q = self.linear_b_q(self.linear_a_q(x))
new_k = self.linear_b_k(self.linear_a_k(x))
new_v = self.linear_b_v(self.linear_a_v(x))
qkv[:, :, :self.dim] += new_q
qkv[:, :, self.dim:-self.dim] += new_k
qkv[:, :, -self.dim:] += new_v
return qkv
class LoRA_ViT_timm(nn.Module):
def __init__(self, vit_model: timm_ViT, r: int, lora_layer=None):
super(LoRA_ViT_timm, self).__init__()
assert r > 0
if lora_layer is not None:
self.lora_layer = lora_layer
else:
self.lora_layer = list(range(len(vit_model.blocks)))
# Create for storage, then we can init them or load weights
# These are linear layers
self.w_As = []
self.w_Bs = []
# Lets freeze first
for param in vit_model.parameters():
param.requires_grad = False
# Here, we do the surgery
for t_layer_i, blk in enumerate(vit_model.blocks):
# If we only want few lora layer instead of all
if t_layer_i not in self.lora_layer:
continue
w_qkv_linear = blk.attn.qkv
self.dim = w_qkv_linear.in_features
w_a_linear_q = nn.Linear(self.dim, r, bias=False)
w_b_linear_q = nn.Linear(r, self.dim, bias=False)
w_a_linear_k = nn.Linear(self.dim, r, bias=False)
w_b_linear_k = nn.Linear(r, self.dim, bias=False)
w_a_linear_v = nn.Linear(self.dim, r, bias=False)
w_b_linear_v = nn.Linear(r, self.dim, bias=False)
self.w_As.extend([w_a_linear_q, w_a_linear_v])
self.w_Bs.extend([w_b_linear_q, w_b_linear_v])
blk.attn.qkv = _LoRA_qkv_timm(
w_qkv_linear,
w_a_linear_q,
w_b_linear_q,
w_a_linear_k,
w_b_linear_k,
w_a_linear_v,
w_b_linear_v,
)
self.reset_parameters()
self.lora_vit = vit_model
def save_fc_parameters(self, filename: str) -> None:
r"""Only safetensors is supported now.
pip install safetensor if you do not have one installed yet.
"""
assert filename.endswith(".safetensors")
_in = self.lora_vit.head.in_features
_out = self.lora_vit.head.out_features
fc_tensors = {f"fc_{_in}in_{_out}out": self.lora_vit.head.weight}
save_file(fc_tensors, filename)
def load_fc_parameters(self, filename: str) -> None:
r"""Only safetensors is supported now.
pip install safetensor if you do not have one installed yet.
"""
assert filename.endswith(".safetensors")
_in = self.lora_vit.head.in_features
_out = self.lora_vit.head.out_features
with safe_open(filename, framework="pt") as f:
saved_key = f"fc_{_in}in_{_out}out"
try:
saved_tensor = f.get_tensor(saved_key)
self.lora_vit.head.weight = Parameter(saved_tensor)
except ValueError:
print("this fc weight is not for this model")
def save_lora_parameters(self, filename: str) -> None:
r"""Only safetensors is supported now.
pip install safetensor if you do not have one installed yet.
save both lora and fc parameters.
"""
assert filename.endswith(".safetensors")
num_layer = len(self.w_As) # actually, it is half
a_tensors = {f"w_a_{i:03d}": self.w_As[i].weight for i in range(num_layer)}
b_tensors = {f"w_b_{i:03d}": self.w_Bs[i].weight for i in range(num_layer)}
_in = self.lora_vit.head.in_features
_out = self.lora_vit.head.out_features
fc_tensors = {f"fc_{_in}in_{_out}out": self.lora_vit.head.weight}
merged_dict = {**a_tensors, **b_tensors, **fc_tensors}
save_file(merged_dict, filename)
def load_lora_parameters(self, filename: str) -> None:
r"""Only safetensors is supported now.
pip install safetensor if you do not have one installed yet.\
load both lora and fc parameters.
"""
assert filename.endswith(".safetensors")
with safe_open(filename, framework="pt") as f:
for i, w_A_linear in enumerate(self.w_As):
saved_key = f"w_a_{i:03d}"
saved_tensor = f.get_tensor(saved_key)
w_A_linear.weight = Parameter(saved_tensor)
for i, w_B_linear in enumerate(self.w_Bs):
saved_key = f"w_b_{i:03d}"
saved_tensor = f.get_tensor(saved_key)
w_B_linear.weight = Parameter(saved_tensor)
_in = self.lora_vit.head.in_features
_out = self.lora_vit.head.out_features
saved_key = f"fc_{_in}in_{_out}out"
try:
saved_tensor = f.get_tensor(saved_key)
self.lora_vit.head.weight = Parameter(saved_tensor)
except ValueError:
print("this fc weight is not for this model")
def reset_parameters(self) -> None:
for w_A in self.w_As:
nn.init.kaiming_uniform_(w_A.weight, a=math.sqrt(5))
for w_B in self.w_Bs:
nn.init.zeros_(w_B.weight)
def forward(self, x: Tensor) -> Tensor:
return self.lora_vit(x)