Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

compression speedup: add init file #2063

Merged
merged 37 commits into from
Feb 15, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
de817c2
update
QuanluZhang Jan 1, 2020
9e4a3d9
update
QuanluZhang Jan 10, 2020
1db91e3
Merge branch 'master' of https://github.com/Microsoft/nni into dev-co…
QuanluZhang Jan 13, 2020
e401f2b
update
QuanluZhang Jan 15, 2020
7ba30c3
update
QuanluZhang Jan 20, 2020
dc865fe
update
QuanluZhang Jan 21, 2020
10c0510
update
QuanluZhang Jan 21, 2020
df1dda7
update
QuanluZhang Jan 22, 2020
9680f3e
update
QuanluZhang Jan 24, 2020
e51f288
update
Jan 28, 2020
f830430
update
Jan 28, 2020
ab7f23d
update
Jan 28, 2020
98e75c2
pass eval result validate, but has very small difference
Jan 29, 2020
ff413d1
add model_speedup.py
Jan 29, 2020
d83f190
update
Jan 30, 2020
ff7e79d
pass fpgm test
Jan 31, 2020
e1240fe
add doc for speedup
Jan 31, 2020
8d333f2
pass l1filter
Jan 31, 2020
b1b2b14
update
Jan 31, 2020
e988f19
update
Feb 1, 2020
b8da18d
Merge branch 'dev-pruner-dataparallel' of https://github.com/microsof…
Feb 5, 2020
12485c7
Merge branch 'dev-pruner-dataparallel' of https://github.com/microsof…
Feb 5, 2020
fbb6d48
remove test files
Feb 5, 2020
1ce3c72
update
Feb 5, 2020
4db78f7
update
Feb 5, 2020
3d51727
update
Feb 5, 2020
70d3b1e
add comments
Feb 5, 2020
c80c7a9
add comments
Feb 6, 2020
005a664
add comments
Feb 6, 2020
d11a54a
add comments
Feb 6, 2020
49e0de1
update
Feb 6, 2020
951b014
resolve comments
Feb 8, 2020
280fb1b
update doc
Feb 10, 2020
c96f8b1
Merge branch 'v1.4' of https://github.com/microsoft/nni into dev-comp…
Feb 15, 2020
4c47da7
add init file
Feb 15, 2020
553879b
remove doc
Feb 15, 2020
61be340
fix pylint
Feb 15, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Licensed under the MIT license.

import torch
from .infer_shape import CoarseMask, ModuleMasks
from .infer_shape import ModuleMasks

replace_module = {
'BatchNorm2d': lambda module, mask: replace_batchnorm2d(module, mask),
Expand Down
2 changes: 1 addition & 1 deletion src/sdk/pynni/nni/compression/speedup/torch/compressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ def _find_successors(self, module_name):
def infer_module_mask(self, module_name, mask=None, in_shape=None, out_shape=None):
"""
Infer input shape / output shape based on the module's weight mask / input shape / output shape.

For a module:
Infer its input and output shape from its weight mask
Infer its output shape from its input shape
Expand Down
11 changes: 5 additions & 6 deletions src/sdk/pynni/nni/compression/speedup/torch/infer_shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def merge_index(index_a, index_b):
s.add(num)
for num in index_b:
s.add(num)
return torch.tensor(sorted(s))
return torch.tensor(sorted(s)) # pylint: disable=not-callable

def merge(self, cmask):
"""
Expand Down Expand Up @@ -98,7 +98,7 @@ def __init__(self, module_name):
self.param_masks = dict()
self.input_mask = None
self.output_mask = None

def set_param_masks(self, name, mask):
"""
Parameters
Expand Down Expand Up @@ -217,7 +217,7 @@ def view_inshape(module_masks, mask, shape):

TODO: consider replace tensor.view with nn.Flatten, because tensor.view is not
included in module, thus, cannot be replaced by our framework.

Parameters
----------
module_masks : ModuleMasks
Expand Down Expand Up @@ -250,7 +250,7 @@ def view_inshape(module_masks, mask, shape):
step_size = shape['in_shape'][2] * shape['in_shape'][3]
for loc in mask.mask_index[1]:
index.extend([loc * step_size + i for i in range(step_size)])
output_cmask.add_index_mask(dim=1, index=torch.tensor(index))
output_cmask.add_index_mask(dim=1, index=torch.tensor(index)) # pylint: disable=not-callable
module_masks.set_output_mask(output_cmask)
return output_cmask

Expand Down Expand Up @@ -373,7 +373,6 @@ def convert_to_coarse_mask(mask):
"""
assert 'weight' in mask
assert isinstance(mask['weight'], torch.Tensor)
cmask = None
weight_mask = mask['weight']
shape = weight_mask.size()
ones = torch.ones(shape[1:]).to(weight_mask.device)
Expand Down Expand Up @@ -451,7 +450,7 @@ def conv2d_outshape(module_masks, mask):
The ModuleMasks instance of the conv2d
mask : CoarseMask
The mask of its output tensor

Returns
-------
CoarseMask
Expand Down