From 2f4b70bba214cf432179b24c6fd8cfe987e286f7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 2 Dec 2021 15:45:41 +0100 Subject: [PATCH] Update TorchScript suffix to ``*.torchscript` --- detect.py | 8 ++++---- export.py | 6 +++--- models/common.py | 12 ++++++------ utils/activations.py | 4 ++-- val.py | 10 +++++----- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/detect.py b/detect.py index ecf868b5eaf4..0b6875e5564c 100644 --- a/detect.py +++ b/detect.py @@ -81,18 +81,18 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) imgsz = check_img_size(imgsz, s=stride) # check image size # Half - half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA - if pt: + half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit) + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs diff --git a/export.py b/export.py index b2f42142e16c..437616a9890d 100644 --- a/export.py +++ b/export.py @@ -5,7 +5,7 @@ Format | Example | Export `include=(...)` argument --- | --- | --- PyTorch | yolov5s.pt | - -TorchScript | yolov5s.torchscript.pt | 'torchscript' +TorchScript | yolov5s.torchscript | 'torchscript' ONNX | yolov5s.onnx | 'onnx' CoreML | yolov5s.mlmodel | 'coreml' TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model' @@ -19,7 +19,7 @@ Inference: $ python path/to/detect.py --weights yolov5s.pt - yolov5s.torchscript.pt + yolov5s.torchscript yolov5s.onnx yolov5s.mlmodel (under development) yolov5s_saved_model @@ -66,7 +66,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' # YOLOv5 TorchScript model export try: LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript.pt') + f = file.with_suffix('.torchscript') ts = torch.jit.trace(model, im, strict=False) d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} diff --git a/models/common.py b/models/common.py index cbd4ff479885..73f21729fa85 100644 --- a/models/common.py +++ b/models/common.py @@ -279,7 +279,7 @@ class DetectMultiBackend(nn.Module): def __init__(self, weights='yolov5s.pt', device=None, dnn=True): # Usage: # PyTorch: weights = *.pt - # TorchScript: *.torchscript.pt + # TorchScript: *.torchscript # CoreML: *.mlmodel # TensorFlow: *_saved_model # TensorFlow: *.pb @@ -289,10 +289,10 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): # TensorRT: *.engine super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) - suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel'] + suffix = Path(w).suffix.lower() + suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel'] check_suffix(w, suffixes) # check weights have acceptable suffix - pt, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans - jit = pt and 'torchscript' in w.lower() + pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if jit: # TorchScript @@ -304,10 +304,10 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): stride, names = int(d['stride']), d['names'] elif pt: # PyTorch from models.experimental import attempt_load # scoped to avoid circular import - model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) + model = attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names - elif coreml: # CoreML *.mlmodel + elif coreml: # CoreML import coremltools as ct model = ct.models.MLModel(w) elif dnn: # ONNX OpenCV DNN diff --git a/utils/activations.py b/utils/activations.py index 4c7d46c32104..a4ff789cf336 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -18,8 +18,8 @@ def forward(x): class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() @staticmethod def forward(x): - # return x * F.hardsigmoid(x) # for torchscript and CoreML - return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for torchscript, CoreML and ONNX + # return x * F.hardsigmoid(x) # for TorchScript and CoreML + return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- diff --git a/val.py b/val.py index bd0ce9a7861d..27edd158a2f6 100644 --- a/val.py +++ b/val.py @@ -111,7 +111,7 @@ def run(data, # Initialize/load model and set device training = model is not None if training: # called by train.py - device, pt, engine = next(model.parameters()).device, True, False # get model device, PyTorch model + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() @@ -124,10 +124,10 @@ def run(data, # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn) - stride, pt, engine = model.stride, model.pt, model.engine + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size - half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA - if pt: + half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + if pt or jit: model.model.half() if half else model.model.float() elif engine: batch_size = model.batch_size @@ -166,7 +166,7 @@ def run(data, pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() - if pt or engine: + if pt or jit or engine: im = im.to(device, non_blocking=True) targets = targets.to(device) im = im.half() if half else im.float() # uint8 to fp16/32