diff --git a/FasterRCNNDetection/model/roi_module.py b/FasterRCNNDetection/model/roi_module.py index 6b45f89..4536881 100644 --- a/FasterRCNNDetection/model/roi_module.py +++ b/FasterRCNNDetection/model/roi_module.py @@ -11,7 +11,7 @@ Stream = namedtuple('Stream', ['ptr']) -@cupy.util.memoize(for_each_device=True) +@cupy.memoize(for_each_device=True) def load_kernel(kernel_name, code, **kwargs): cp.cuda.runtime.free(0) code = Template(code).substitute(**kwargs) diff --git a/FasterRCNNDetection/model/utils/nms/non_maximum_suppression.py b/FasterRCNNDetection/model/utils/nms/non_maximum_suppression.py index c488b52..beb5e43 100644 --- a/FasterRCNNDetection/model/utils/nms/non_maximum_suppression.py +++ b/FasterRCNNDetection/model/utils/nms/non_maximum_suppression.py @@ -8,12 +8,12 @@ import warnings warnings.warn(''' the python code for non_maximum_suppression is about 2x slow - It is strongly recommended to build cython code: + It is strongly recommended to build cython code: `cd model/utils/nms/; python3 build.py build_ext --inplace''') from ._nms_gpu_post_py import _nms_gpu_post -@cp.util.memoize(for_each_device=True) +@cp.memoize(for_each_device=True) def _load_kernel(kernel_name, code, options=()): cp.cuda.runtime.free(0) assert isinstance(options, tuple)