-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy pathp_utils.py
83 lines (70 loc) · 2.89 KB
/
p_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# Copyright 2021 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..models import *
def get_some_data(train_dataloader, num_batches, device):
traindata = []
dataloader_iter = iter(train_dataloader)
for _ in range(num_batches):
traindata.append(next(dataloader_iter))
inputs = torch.cat([a for a,_ in traindata])
targets = torch.cat([b for _,b in traindata])
inputs = inputs.to(device)
targets = targets.to(device)
return inputs, targets
def get_some_data_grasp(train_dataloader, num_classes, samples_per_class, device):
datas = [[] for _ in range(num_classes)]
labels = [[] for _ in range(num_classes)]
mark = dict()
dataloader_iter = iter(train_dataloader)
while True:
inputs, targets = next(dataloader_iter)
for idx in range(inputs.shape[0]):
x, y = inputs[idx:idx+1], targets[idx:idx+1]
category = y.item()
if len(datas[category]) == samples_per_class:
mark[category] = True
continue
datas[category].append(x)
labels[category].append(y)
if len(mark) == num_classes:
break
x = torch.cat([torch.cat(_, 0) for _ in datas]).to(device)
y = torch.cat([torch.cat(_) for _ in labels]).view(-1).to(device)
return x, y
def get_layer_metric_array(net, metric, mode):
metric_array = []
for layer in net.modules():
if mode=='channel' and hasattr(layer,'dont_ch_prune'):
continue
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
metric_array.append(metric(layer))
return metric_array
def reshape_elements(elements, shapes, device):
def broadcast_val(elements, shapes):
ret_grads = []
for e,sh in zip(elements, shapes):
ret_grads.append(torch.stack([torch.Tensor(sh).fill_(v) for v in e], dim=0).to(device))
return ret_grads
if type(elements[0]) == list:
outer = []
for e,sh in zip(elements, shapes):
outer.append(broadcast_val(e,sh))
return outer
else:
return broadcast_val(elements, shapes)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)