diff --git a/api/src/main/java/ai/djl/modality/cv/BufferedImageFactory.java b/api/src/main/java/ai/djl/modality/cv/BufferedImageFactory.java index d850b08a143..40735ddeca7 100644 --- a/api/src/main/java/ai/djl/modality/cv/BufferedImageFactory.java +++ b/api/src/main/java/ai/djl/modality/cv/BufferedImageFactory.java @@ -39,6 +39,8 @@ import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import javax.imageio.ImageIO; @@ -306,10 +308,18 @@ public void drawBoundingBoxes(DetectedObjects detections) { int imageHeight = image.getHeight(); List list = detections.items(); + int k = 10; + Map classNumberTable = new ConcurrentHashMap<>(); for (DetectedObjects.DetectedObject result : list) { String className = result.getClassName(); BoundingBox box = result.getBoundingBox(); - g.setPaint(randomColor().darker()); + if (classNumberTable.containsKey(className)) { + g.setPaint(new Color(classNumberTable.get(className))); + } else { + g.setPaint(new Color(k)); + classNumberTable.put(className, k); + k = (k + 100) % 255; + } Rectangle rectangle = box.getBounds(); int x = (int) (rectangle.getX() * imageWidth); diff --git a/api/src/main/java/ai/djl/modality/cv/transform/OneHot.java b/api/src/main/java/ai/djl/modality/cv/transform/OneHot.java index 0e99c4603d8..e80152ae660 100644 --- a/api/src/main/java/ai/djl/modality/cv/transform/OneHot.java +++ b/api/src/main/java/ai/djl/modality/cv/transform/OneHot.java @@ -21,7 +21,7 @@ public class OneHot implements Transform { private int numClass; /** - * Creates a {@code toOneHot} {@link Transform} that converts the sparse label to one-hot label. + * Creates a {@code OneHot} {@link Transform} that converts the sparse label to one-hot label. * * @param numClass number of classes */ diff --git a/api/src/main/java/ai/djl/modality/cv/translator/ObjectDetectionTranslator.java b/api/src/main/java/ai/djl/modality/cv/translator/ObjectDetectionTranslator.java index b0f21bb69f2..1031cb182a5 100644 --- a/api/src/main/java/ai/djl/modality/cv/translator/ObjectDetectionTranslator.java +++ b/api/src/main/java/ai/djl/modality/cv/translator/ObjectDetectionTranslator.java @@ -133,6 +133,7 @@ protected void configPostProcess(Map arguments) { if (ArgumentsUtil.booleanValue(arguments, "rescale")) { optRescaleSize(width, height); } + optApplyRatio(ArgumentsUtil.booleanValue(arguments, "optApplyRatio")); threshold = ArgumentsUtil.floatValue(arguments, "threshold", 0.2f); } } diff --git a/examples/build.gradle b/examples/build.gradle index 67e84f09044..52b20567fb0 100644 --- a/examples/build.gradle +++ b/examples/build.gradle @@ -32,6 +32,7 @@ dependencies { runtimeOnly project(":engines:pytorch:pytorch-model-zoo") runtimeOnly project(":engines:tensorflow:tensorflow-model-zoo") runtimeOnly project(":engines:mxnet:mxnet-model-zoo") + runtimeOnly project(":engines:onnxruntime:onnxruntime-engine") testImplementation("org.testng:testng:${testng_version}") { exclude group: "junit", module: "junit" diff --git a/examples/docs/mask_detection.md b/examples/docs/mask_detection.md new file mode 100644 index 00000000000..0cb92115acf --- /dev/null +++ b/examples/docs/mask_detection.md @@ -0,0 +1,49 @@ +# Mask detection with YOLOv5 - training and inference + +YOLOv5 is a powerful model for object detection tasks. With the transfer learning technique, a pre-trained YOLOv5 model can be utilized in various customized object detection tasks with relatively small dataset. + +In this example, we apply it on the [Face Mask Detection dataset](https://www.kaggle.com/datasets/andrewmvd/face-mask-detection?select=images). We first train the YOLOv5s model in Python, with the help of [ATLearn](), a python transfer learning toolkit. +Then, the model is saved as an ONNX model, which is then imported into DJL for inference. We apply it on the mask wearing detection task. The source code can be found at [MaskDetectionOnnx.java](https://github.com/deepjavalibrary/djl/blob/master/examples/src/main/java/ai/djl/examples/inference/MaskDetectionOnnx.java) + +## The training part in ATLearn + +We initially attempted to import a pretrained YOLOv5 into DJL, and fine-tune it with the [Face Mask Detection dataset](https://www.kaggle.com/datasets/andrewmvd/face-mask-detection?select=images), similar to [Train ResNet for Fruit Freshness Classficiation](./train_transfer_fresh_fruit.md). However, YOLOv5 can not be converted to a PyTorch traced model, due to its data-dependent execution flow (see this [discussion](https://discuss.pytorch.org/t/yolov5-convert-to-torchscript/150180)), whick blocks the idea of retraining a Yolov5 model in DJL. So the training part is entirely in python. + +The retraining of YOLOv5 can be found in an example in ATLearn: `examples/docs/face_mask_detection.md`. In this example, the YOLOv5 layers near the input are frozen while those near the output are fine-tuned with the customized data. This follows the transfer learning idea. + +In this example, the trained model is exported to ONNX file and is then also used for inference in python, which will serve as a benchmark. + +## Setup guide + +To configure your development environment, follow [setup](../../docs/development/setup.md). + +## Run mask detection example + +### Input image file +We use the following image as input: + +![mask](https://resources.djl.ai/images/face_mask_detection/face_mask.png) + +### Build the project and run +Use the following command to run the project: + +``` +cd examples +./gradlew run -Dmain=ai.djl.examples.inference.MaskDetection +``` + +Your output should look like the following: + +```text +[INFO ] - Detected objects image has been saved in: build/output/face_mask_result.png +[INFO ] - { + "w/o mask": 0.8998132944107056, + "w/ mask": 0.8930246829986572, + "w/ mask": 0.8708265423774719, + ... +} +``` + +An output image with bounding box will be saved as `build/output/detected-mask-wearing.png`: + +![detected-result](https://resources.djl.ai/images/face_mask_detection/face_mask_result.png) diff --git a/examples/docs/object_detection.md b/examples/docs/object_detection.md index 8f01702733b..f73cea26871 100644 --- a/examples/docs/object_detection.md +++ b/examples/docs/object_detection.md @@ -43,3 +43,21 @@ Your output should look like the following: An output image with bounding box will be saved as build/output/detected-dog_bike_car.png: ![detected-dogs](img/detected-dog_bike_car.png) + +## Run object detection example with other engines +For objection detection application, other than the default model zoo with the default engine, +we can also run it with other engines and model zoo. Here, we demonstrate with a pre-trained *YOLOV5s ONNX* model. + +The model can be easily loaded with the following criteria + +```java +Criteria criteria = + Criteria.builder() + .optApplication(Application.CV.OBJECT_DETECTION) + .setTypes(Image.class, DetectedObjects.class) + .optEngine("OnnxRuntime") + .optProgress(new ProgressBar()) + .build(); +``` + +where the `optFilter` is removed and `optEngine` is specified. The rest would be the same. diff --git a/examples/docs/train_transfer_fresh_fruit.md b/examples/docs/train_transfer_fresh_fruit.md index 1d6e7823703..7813968fbea 100644 --- a/examples/docs/train_transfer_fresh_fruit.md +++ b/examples/docs/train_transfer_fresh_fruit.md @@ -1,4 +1,4 @@ -# Blazing fast training with small dataset for Java applications +# Train ResNet for Fruit Freshness Classficiation Deep learning has shown its strong power in solving problems in various areas like CV, NLP, reinforcement learning, etc., which generates numerous examples of successful applications. @@ -205,7 +205,7 @@ Here, the data are preprocessed with the normalization and randomization functio commonly used for [image classification](https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html). The randomization are for training only. -**Model training and export. ** Finally, we can run the model training with `Easytrain.fit`, +**Model training and export.** Finally, we can run the model training with `Easytrain.fit`, and save the model for prediction. In the end, the `model.close()` and `embedding.close()` are called. In DJL, during the creation of `Model` and `ZooModel`, the native resources (e.g., memories in the assigned in PyTorch) are allocated. These resources are managed diff --git a/examples/pom.xml b/examples/pom.xml index e9786acba75..f27dbb2facb 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -78,6 +78,11 @@ ai.djl.tensorflow tensorflow-model-zoo + + + ai.djl.onnxruntime + onnxruntime-engine + org.testng testng diff --git a/examples/src/main/java/ai/djl/examples/inference/MaskDetection.java b/examples/src/main/java/ai/djl/examples/inference/MaskDetection.java new file mode 100644 index 00000000000..ab06b29ff31 --- /dev/null +++ b/examples/src/main/java/ai/djl/examples/inference/MaskDetection.java @@ -0,0 +1,92 @@ +/* + * Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance + * with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0/ + * + * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES + * OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ +package ai.djl.examples.inference; + +import ai.djl.ModelException; +import ai.djl.inference.Predictor; +import ai.djl.modality.cv.Image; +import ai.djl.modality.cv.ImageFactory; +import ai.djl.modality.cv.output.DetectedObjects; +import ai.djl.modality.cv.translator.YoloV5TranslatorFactory; +import ai.djl.repository.zoo.Criteria; +import ai.djl.repository.zoo.ZooModel; +import ai.djl.training.util.ProgressBar; +import ai.djl.translate.TranslateException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +/** + * An example of inference using an object detection model. + * + *

See this doc + * for information about this example. + */ +public final class MaskDetection { + + private static final Logger logger = LoggerFactory.getLogger(MaskDetection.class); + + private MaskDetection() {} + + public static void main(String[] args) throws IOException, ModelException, TranslateException { + DetectedObjects detection = MaskDetection.predict(); + logger.info("{}", detection); + } + + public static DetectedObjects predict() throws IOException, ModelException, TranslateException { + // To feed in local image, use ImageFactory.getInstance().fromFile(...) + String imageUrl = "https://resources.djl.ai/images/face_mask_detection/face_mask.png"; + Image img = ImageFactory.getInstance().fromUrl(imageUrl); + + // modelUrl can be replaced to local onnx model file + String modelUrl = "https://resources.djl.ai/demo/onnxruntime/face_mask_detection.zip"; + Criteria criteria = + Criteria.builder() + .setTypes(Image.class, DetectedObjects.class) + .optModelUrls(modelUrl) + .optEngine("OnnxRuntime") + .optTranslatorFactory(new YoloV5TranslatorFactory()) + .optProgress(new ProgressBar()) + .optArgument("optApplyRatio", true) // post process + .optArgument("rescale", true) // post process + .build(); + + try (ZooModel model = criteria.loadModel()) { + try (Predictor predictor = model.newPredictor()) { + DetectedObjects detection = predictor.predict(img); + String outputDir = "build/output"; + saveBoundingBoxImage(img, detection, outputDir); + return detection; + } + } + } + + private static void saveBoundingBoxImage(Image img, DetectedObjects detection, String outputDir) + throws IOException { + Path outputPath = Paths.get(outputDir); + Files.createDirectories(outputPath); + + img.drawBoundingBoxes(detection); + + Path imagePath = outputPath.resolve("face_mask_result.png"); + // OpenJDK can't save jpg with alpha channel + img.save(Files.newOutputStream(imagePath), "png"); + logger.info("Detected objects image has been saved in: {}", imagePath); + } +} diff --git a/examples/src/test/java/ai/djl/examples/inference/MaskDetectionTest.java b/examples/src/test/java/ai/djl/examples/inference/MaskDetectionTest.java new file mode 100644 index 00000000000..db8f4fb74ba --- /dev/null +++ b/examples/src/test/java/ai/djl/examples/inference/MaskDetectionTest.java @@ -0,0 +1,48 @@ +/* + * Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance + * with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0/ + * + * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES + * OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + */ +package ai.djl.examples.inference; + +import ai.djl.ModelException; +import ai.djl.modality.Classifications; +import ai.djl.modality.cv.output.DetectedObjects; +import ai.djl.testing.TestRequirements; +import ai.djl.translate.TranslateException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testng.Assert; +import org.testng.annotations.Test; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +public class MaskDetectionTest { + + private static final Logger logger = LoggerFactory.getLogger(MaskDetectionTest.class); + + @Test + public void testMaskDetection() throws ModelException, TranslateException, IOException { + TestRequirements.engine("OnnxRuntime"); + + DetectedObjects result = MaskDetection.predict(); + logger.info("{}", result); + + Assert.assertTrue(result.getNumberOfObjects() >= 8); + Classifications.Classification obj = result.best(); + String className = obj.getClassName(); + List objects = Arrays.asList("w/o mask", "w/ mask"); + Assert.assertTrue(objects.contains(className)); + Assert.assertTrue(obj.getProbability() > 0.8); + } +}